From 7371adb63196fea4332eaaf7b24833408c64a611 Mon Sep 17 00:00:00 2001 From: Vineeth Bandi Date: Wed, 15 Jan 2025 09:15:54 -0600 Subject: [PATCH] patch kubernetes/kubernetes for 1.26 and 1.27 --- projects/kubernetes/kubernetes/1-26/CHECKSUMS | 38 +- .../0017-EKS-PATCH-fix-CVE-2023-47108.patch | 131067 +++++++++++++++ .../1-26/patches/0018-skip-x509-tests.patch | 26 + projects/kubernetes/kubernetes/1-27/CHECKSUMS | 38 +- .../0016-EKS-PATCH-fix-CVE-2023-47108.patch | 129046 ++++++++++++++ 5 files changed, 260177 insertions(+), 38 deletions(-) create mode 100644 projects/kubernetes/kubernetes/1-26/patches/0017-EKS-PATCH-fix-CVE-2023-47108.patch create mode 100644 projects/kubernetes/kubernetes/1-26/patches/0018-skip-x509-tests.patch create mode 100644 projects/kubernetes/kubernetes/1-27/patches/0016-EKS-PATCH-fix-CVE-2023-47108.patch diff --git a/projects/kubernetes/kubernetes/1-26/CHECKSUMS b/projects/kubernetes/kubernetes/1-26/CHECKSUMS index 06713e01a2..2bb1fea4ed 100644 --- a/projects/kubernetes/kubernetes/1-26/CHECKSUMS +++ b/projects/kubernetes/kubernetes/1-26/CHECKSUMS @@ -1,19 +1,19 @@ -c1e38f1c5b629d1ec7bbd5cbb37fbdeb584d61482799cbc4c2b94b85b9209479 _output/1-26/bin/darwin/amd64/kubectl -b0d6f4a84cd65330f714609733f2aa3a0801e21f4041e3429c1e6a2e99157fff _output/1-26/bin/linux/amd64/kube-apiserver -99ab6c021ec79edf453e75df4a673b46c6dbefdd12798654be2e9a3a081a7577 _output/1-26/bin/linux/amd64/kube-controller-manager -eb856212dc3e213017cc2b8472c95166ce674d93d5f49bd91fdc32c8f42cc174 _output/1-26/bin/linux/amd64/kube-proxy -5347edf196cf8b33a2c46e0fff5eeaf98e8dfe61349c81a0474def18cb51eb8a _output/1-26/bin/linux/amd64/kube-scheduler -591bb0435d125946d44fe03c676a9735dc8e7e892f1421dd5580a12d4dce3dfe _output/1-26/bin/linux/amd64/kubeadm -f678cab763b65300739375e3015d484208dd1e7d37cb88a437f7a4967eceba29 _output/1-26/bin/linux/amd64/kubectl -9318117b2a1a58fa4a1f988146dea7edf214a219d15cb40506ac6f6239109f8f _output/1-26/bin/linux/amd64/kubelet -d26926eb62718ccf8fa6ab52193c750c77ccad3379fddac8fe7300aef5782070 _output/1-26/bin/linux/arm64/kube-apiserver -8fd8613f99c70d764b5ef440d782b1741d824e358b3ea1d7c55e0a80483f95c8 _output/1-26/bin/linux/arm64/kube-controller-manager -94cd67fa641aa54cd35fe5875d1499240d7df8d8993a6767342da16f4d547985 _output/1-26/bin/linux/arm64/kube-proxy -a4a0f16202cc251eff9cc5b85c80c8dbf25de79a564ffd21c1859d99b81ee4ab _output/1-26/bin/linux/arm64/kube-scheduler -f09f997c66365343cabe6ef8f914bcb3c2764accee68a82856b65e5d2fc9a18a _output/1-26/bin/linux/arm64/kubeadm -5cf6afb259fb196bb455ef69cc3134798a3dbf73147a827e849b02733c5cb801 _output/1-26/bin/linux/arm64/kubectl -33346598c7e28d056e29d550ab4baef5bacb12f75262a0f2d3da011eb1fa1baf _output/1-26/bin/linux/arm64/kubelet -00b08ba3de26f777162a1969f55b4ba863d62690bf6488f1074cb056ed589df7 _output/1-26/bin/windows/amd64/kube-proxy.exe -4e1400977e54cd1c7b28b4e886ef8157a23753fe3770876e2504e4e92f69ac7d _output/1-26/bin/windows/amd64/kubeadm.exe -65b32bd54fa862e3a6b366cf6ddb8eb39e46f38fb0d79687eb31119f2d301994 _output/1-26/bin/windows/amd64/kubectl.exe -9d16e134e37c7e3634b42e5288a33ffaa87c4839bb3a283478b7da63bde603e2 _output/1-26/bin/windows/amd64/kubelet.exe +b86fd6b96e7a6b0cc37fd2d558ef0d807d68740a35a0ad37926213d68cac8b49 _output/1-26/bin/darwin/amd64/kubectl +cacfab5bc4801fd3b2769d9697289297a533ee7c8fcb39eb132438878cd93659 _output/1-26/bin/linux/amd64/kube-apiserver +6790c933fd7738a720dfb567ed83d323b8e141e3cb672aeead24a0d343c6aa8f _output/1-26/bin/linux/amd64/kube-controller-manager +b3e6ed1b2adde812d9eeb19062b4ab77772806a602106c08861d52b6bc1ee8ae _output/1-26/bin/linux/amd64/kube-proxy +a7580d839d7551a9e75609c0962e709a359255b7e9d3ae41fd5a68311167bc10 _output/1-26/bin/linux/amd64/kube-scheduler +6ac2422c03acdd62637f46df238304ae1f9c213fff083d5371ae263b4cd872ae _output/1-26/bin/linux/amd64/kubeadm +5b61cf4f1270362ed73ef0bdc546f9614aeec5bb91d73e407b2dbc29a512afac _output/1-26/bin/linux/amd64/kubectl +8313fc9f3c2b0661c0e2d88f9ec1e8b5f31a7bdf27fea732ce8abf202c009e0e _output/1-26/bin/linux/amd64/kubelet +d8daaa5ab82f3783435668d135748743a8a3c3fd90356075a4990eb61e038a24 _output/1-26/bin/linux/arm64/kube-apiserver +fe2112a3389db88ddd74f020892d3bc711d399b74ecd600b0a06b994d768e68c _output/1-26/bin/linux/arm64/kube-controller-manager +b5600246e77b8b05e7fc724b9001906eb08fd8587af9d23e61acc7acfc0bd940 _output/1-26/bin/linux/arm64/kube-proxy +fc2dc525b0c0176c147678daa842b7a8300b263f2be7146bef1c9d1c010853be _output/1-26/bin/linux/arm64/kube-scheduler +3dcaacd09bb404c6d7efe5d328ffd638aec80ad386c6a4eda0178cc521af882e _output/1-26/bin/linux/arm64/kubeadm +599f473d51b5b9cab866dabc28a7536cea845e7c005f5dc82ee3b25a8fc44c9d _output/1-26/bin/linux/arm64/kubectl +aa20d34df2d25a91bd1fb0dbed278e2830eea3a5ae85805fdf37da60249b4811 _output/1-26/bin/linux/arm64/kubelet +705682c18699da71f1216f9b0276e4bfc9ee06bd12559737876c0709ebf7eec4 _output/1-26/bin/windows/amd64/kube-proxy.exe +da9cc76d8c5c454ba524133cdee3424fbe1059242f88ece5030ac032c5444544 _output/1-26/bin/windows/amd64/kubeadm.exe +b02d2fabbbf7bbafcad9e2d2f8a4fd313358f89f7a57816ccc891e0fb7594a45 _output/1-26/bin/windows/amd64/kubectl.exe +8b2e729079075ac3cebb12ae057ec4f0350dc70876df2095e89348ed1220b7a6 _output/1-26/bin/windows/amd64/kubelet.exe diff --git a/projects/kubernetes/kubernetes/1-26/patches/0017-EKS-PATCH-fix-CVE-2023-47108.patch b/projects/kubernetes/kubernetes/1-26/patches/0017-EKS-PATCH-fix-CVE-2023-47108.patch new file mode 100644 index 0000000000..4460375dd0 --- /dev/null +++ b/projects/kubernetes/kubernetes/1-26/patches/0017-EKS-PATCH-fix-CVE-2023-47108.patch @@ -0,0 +1,131067 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: vela +Date: Mon, 2 Dec 2024 02:15:14 -0800 +Subject: [PATCH] --EKS-PATCH-- fix CVE-2023-47108 + +--- + .../google/s2a-go}/LICENSE | 5 +- + go.mod | 49 +- + go.sum | 363 +- + pkg/kubelet/cri/remote/remote_image.go | 1 + + pkg/kubelet/cri/remote/remote_runtime.go | 1 + + staging/src/k8s.io/api/go.mod | 7 +- + staging/src/k8s.io/api/go.sum | 29 +- + .../src/k8s.io/apiextensions-apiserver/go.mod | 41 +- + .../src/k8s.io/apiextensions-apiserver/go.sum | 347 +- + staging/src/k8s.io/apimachinery/go.mod | 14 +- + staging/src/k8s.io/apimachinery/go.sum | 33 +- + staging/src/k8s.io/apiserver/go.mod | 41 +- + staging/src/k8s.io/apiserver/go.sum | 347 +- + .../apiserver/pkg/endpoints/filters/traces.go | 13 +- + .../storage/storagebackend/factory/etcd3.go | 1 + + staging/src/k8s.io/cli-runtime/go.mod | 10 +- + staging/src/k8s.io/cli-runtime/go.sum | 34 +- + staging/src/k8s.io/client-go/go.mod | 10 +- + staging/src/k8s.io/client-go/go.sum | 34 +- + staging/src/k8s.io/cloud-provider/go.mod | 41 +- + staging/src/k8s.io/cloud-provider/go.sum | 355 +- + staging/src/k8s.io/cluster-bootstrap/go.mod | 4 +- + staging/src/k8s.io/cluster-bootstrap/go.sum | 28 +- + .../src/k8s.io/code-generator/examples/go.sum | 1407 +- + staging/src/k8s.io/code-generator/go.mod | 9 +- + staging/src/k8s.io/code-generator/go.sum | 26 +- + staging/src/k8s.io/component-base/go.mod | 36 +- + staging/src/k8s.io/component-base/go.sum | 250 +- + staging/src/k8s.io/component-helpers/go.mod | 6 +- + staging/src/k8s.io/component-helpers/go.sum | 31 +- + staging/src/k8s.io/controller-manager/go.mod | 41 +- + staging/src/k8s.io/controller-manager/go.sum | 355 +- + staging/src/k8s.io/cri-api/go.mod | 13 +- + staging/src/k8s.io/cri-api/go.sum | 51 +- + staging/src/k8s.io/csi-translation-lib/go.mod | 6 +- + staging/src/k8s.io/csi-translation-lib/go.sum | 30 +- + .../k8s.io/dynamic-resource-allocation/go.mod | 12 +- + .../k8s.io/dynamic-resource-allocation/go.sum | 51 +- + staging/src/k8s.io/kms/go.mod | 5 +- + staging/src/k8s.io/kms/go.sum | 28 +- + staging/src/k8s.io/kube-aggregator/go.mod | 41 +- + staging/src/k8s.io/kube-aggregator/go.sum | 355 +- + .../src/k8s.io/kube-controller-manager/go.mod | 3 +- + .../src/k8s.io/kube-controller-manager/go.sum | 58 +- + staging/src/k8s.io/kube-proxy/go.mod | 5 +- + staging/src/k8s.io/kube-proxy/go.sum | 55 +- + staging/src/k8s.io/kube-scheduler/go.mod | 4 +- + staging/src/k8s.io/kube-scheduler/go.sum | 54 +- + staging/src/k8s.io/kubectl/go.mod | 10 +- + staging/src/k8s.io/kubectl/go.sum | 62 +- + staging/src/k8s.io/kubelet/go.mod | 8 +- + staging/src/k8s.io/kubelet/go.sum | 69 +- + .../azure/azure_blobDiskController_test.go | 2 +- + .../src/k8s.io/legacy-cloud-providers/go.mod | 21 +- + .../src/k8s.io/legacy-cloud-providers/go.sum | 109 +- + staging/src/k8s.io/metrics/go.mod | 8 +- + staging/src/k8s.io/metrics/go.sum | 32 +- + staging/src/k8s.io/mount-utils/go.mod | 10 +- + staging/src/k8s.io/mount-utils/go.sum | 27 +- + .../src/k8s.io/pod-security-admission/go.mod | 41 +- + .../src/k8s.io/pod-security-admission/go.sum | 355 +- + staging/src/k8s.io/sample-apiserver/go.mod | 39 +- + staging/src/k8s.io/sample-apiserver/go.sum | 352 +- + staging/src/k8s.io/sample-cli-plugin/go.mod | 8 +- + staging/src/k8s.io/sample-cli-plugin/go.sum | 31 +- + staging/src/k8s.io/sample-controller/go.mod | 6 +- + staging/src/k8s.io/sample-controller/go.sum | 31 +- + .../go/compute/internal/version.go | 2 +- + .../cenkalti/backoff/v4/.travis.yml | 10 - + .../github.com/cenkalti/backoff/v4/retry.go | 50 +- + vendor/github.com/go-logr/logr/.golangci.yaml | 3 - + vendor/github.com/go-logr/logr/README.md | 113 +- + vendor/github.com/go-logr/logr/SECURITY.md | 18 + + vendor/github.com/go-logr/logr/discard.go | 32 +- + vendor/github.com/go-logr/logr/funcr/funcr.go | 75 +- + vendor/github.com/go-logr/logr/logr.go | 201 +- + .../google/go-cmp/cmp/cmpopts/equate.go | 49 +- + .../google/go-cmp/cmp/cmpopts/ignore.go | 16 +- + .../google/go-cmp/cmp/cmpopts/sort.go | 12 +- + .../google/go-cmp/cmp/cmpopts/xform.go | 4 +- + .../github.com/google/go-cmp/cmp/compare.go | 38 +- + .../cmp/{export_unsafe.go => export.go} | 5 - + .../google/go-cmp/cmp/export_panic.go | 16 - + .../value/{pointer_unsafe.go => pointer.go} | 3 - + .../cmp/internal/value/pointer_purego.go | 34 - + .../github.com/google/go-cmp/cmp/options.go | 84 +- + vendor/github.com/google/go-cmp/cmp/path.go | 46 +- + .../google/go-cmp/cmp/report_reflect.go | 2 +- + vendor/github.com/google/s2a-go/.gitignore | 6 + + .../google/s2a-go/CODE_OF_CONDUCT.md | 93 + + .../github.com/google/s2a-go/CONTRIBUTING.md | 29 + + .../google/s2a-go/LICENSE.md} | 1 + + vendor/github.com/google/s2a-go/README.md | 17 + + .../google/s2a-go/fallback/s2a_fallback.go | 167 + + .../s2a-go/internal/authinfo/authinfo.go | 119 + + .../s2a-go/internal/handshaker/handshaker.go | 438 + + .../internal/handshaker/service/service.go | 99 + + .../proto/common_go_proto/common.pb.go | 389 + + .../s2a_context_go_proto/s2a_context.pb.go | 267 + + .../internal/proto/s2a_go_proto/s2a.pb.go | 1377 + + .../proto/s2a_go_proto/s2a_grpc.pb.go | 173 + + .../proto/v2/common_go_proto/common.pb.go | 367 + + .../v2/s2a_context_go_proto/s2a_context.pb.go | 248 + + .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 2494 ++ + .../proto/v2/s2a_go_proto/s2a_grpc.pb.go | 159 + + .../internal/aeadcrypter/aeadcrypter.go | 34 + + .../record/internal/aeadcrypter/aesgcm.go | 70 + + .../record/internal/aeadcrypter/chachapoly.go | 67 + + .../record/internal/aeadcrypter/common.go | 92 + + .../record/internal/halfconn/ciphersuite.go | 98 + + .../record/internal/halfconn/counter.go | 60 + + .../record/internal/halfconn/expander.go | 59 + + .../record/internal/halfconn/halfconn.go | 193 + + .../google/s2a-go/internal/record/record.go | 757 + + .../s2a-go/internal/record/ticketsender.go | 176 + + .../internal/tokenmanager/tokenmanager.go | 70 + + .../google/s2a-go/internal/v2/README.md | 1 + + .../internal/v2/certverifier/certverifier.go | 122 + + .../testdata/client_intermediate_cert.der | Bin 0 -> 998 bytes + .../testdata/client_leaf_cert.der | Bin 0 -> 1147 bytes + .../testdata/client_root_cert.der | Bin 0 -> 1013 bytes + .../testdata/server_intermediate_cert.der | Bin 0 -> 998 bytes + .../testdata/server_leaf_cert.der | Bin 0 -> 1147 bytes + .../testdata/server_root_cert.der | Bin 0 -> 1013 bytes + .../internal/v2/remotesigner/remotesigner.go | 186 + + .../v2/remotesigner/testdata/client_cert.der | Bin 0 -> 1013 bytes + .../v2/remotesigner/testdata/client_cert.pem | 24 + + .../v2/remotesigner/testdata/client_key.pem | 27 + + .../v2/remotesigner/testdata/server_cert.der | Bin 0 -> 1013 bytes + .../v2/remotesigner/testdata/server_cert.pem | 24 + + .../v2/remotesigner/testdata/server_key.pem | 27 + + .../google/s2a-go/internal/v2/s2av2.go | 354 + + .../internal/v2/testdata/client_cert.pem | 24 + + .../internal/v2/testdata/client_key.pem | 27 + + .../internal/v2/testdata/server_cert.pem | 24 + + .../internal/v2/testdata/server_key.pem | 27 + + .../tlsconfigstore/testdata/client_cert.pem | 24 + + .../v2/tlsconfigstore/testdata/client_key.pem | 27 + + .../tlsconfigstore/testdata/server_cert.pem | 24 + + .../v2/tlsconfigstore/testdata/server_key.pem | 27 + + .../v2/tlsconfigstore/tlsconfigstore.go | 404 + + vendor/github.com/google/s2a-go/s2a.go | 412 + + .../github.com/google/s2a-go/s2a_options.go | 208 + + vendor/github.com/google/s2a-go/s2a_utils.go | 79 + + .../google/s2a-go/stream/s2a_stream.go | 34 + + .../google/s2a-go/testdata/client_cert.pem | 24 + + .../google/s2a-go/testdata/client_key.pem | 27 + + .../google/s2a-go/testdata/server_cert.pem | 24 + + .../google/s2a-go/testdata/server_key.pem | 27 + + vendor/github.com/google/uuid/.travis.yml | 9 - + vendor/github.com/google/uuid/CHANGELOG.md | 10 + + vendor/github.com/google/uuid/CONTRIBUTING.md | 16 + + vendor/github.com/google/uuid/README.md | 10 +- + vendor/github.com/google/uuid/node_js.go | 2 +- + vendor/github.com/google/uuid/uuid.go | 10 +- + .../gax-go/v2/.release-please-manifest.json | 2 +- + .../googleapis/gax-go/v2/CHANGES.md | 45 + + .../googleapis/gax-go/v2/apierror/apierror.go | 14 + + .../googleapis/gax-go/v2/call_option.go | 21 + + .../github.com/googleapis/gax-go/v2/header.go | 68 +- + .../googleapis/gax-go/v2/internal/version.go | 2 +- + .../github.com/googleapis/gax-go/v2/invoke.go | 10 + + .../grpc-gateway/v2/internal/httprule/fuzz.go | 4 +- + .../v2/internal/httprule/parse.go | 30 +- + .../grpc-gateway/v2/runtime/BUILD.bazel | 10 +- + .../grpc-gateway/v2/runtime/context.go | 72 +- + .../grpc-gateway/v2/runtime/convert.go | 46 +- + .../grpc-gateway/v2/runtime/errors.go | 17 +- + .../grpc-gateway/v2/runtime/fieldmask.go | 9 +- + .../grpc-gateway/v2/runtime/handler.go | 26 +- + .../grpc-gateway/v2/runtime/marshal_jsonpb.go | 38 +- + .../grpc-gateway/v2/runtime/marshal_proto.go | 9 +- + .../grpc-gateway/v2/runtime/mux.go | 162 +- + .../grpc-gateway/v2/runtime/pattern.go | 2 - + .../grpc-gateway/v2/runtime/query.go | 69 +- + .../grpc-gateway/v2/utilities/BUILD.bazel | 6 +- + .../v2/utilities/readerfactory.go | 3 +- + .../v2/utilities/string_array_flag.go | 33 + + .../grpc-gateway/v2/utilities/trie.go | 2 +- + .../testify/assert/assertion_compare.go | 36 +- + .../testify/assert/assertion_format.go | 216 +- + .../testify/assert/assertion_forward.go | 432 +- + .../testify/assert/assertion_order.go | 24 +- + .../stretchr/testify/assert/assertions.go | 384 +- + .../github.com/stretchr/testify/assert/doc.go | 43 +- + .../testify/assert/http_assertions.go | 12 +- + .../github.com/stretchr/testify/mock/doc.go | 30 +- + .../github.com/stretchr/testify/mock/mock.go | 172 +- + .../stretchr/testify/require/doc.go | 23 +- + .../stretchr/testify/require/require.go | 444 +- + .../testify/require/require_forward.go | 432 +- + .../google.golang.org/grpc/otelgrpc/config.go | 229 + + .../google.golang.org/grpc/otelgrpc/doc.go | 22 + + .../grpc/otelgrpc/grpctrace.go | 163 - + .../grpc/otelgrpc/interceptor.go | 311 +- + .../grpc/otelgrpc/internal/parse.go | 26 +- + .../grpc/otelgrpc/metadata_supplier.go | 98 + + .../grpc/otelgrpc/semconv.go | 4 +- + .../grpc/otelgrpc/stats_handler.go | 235 + + .../grpc/otelgrpc/version.go | 6 +- + .../net/http/otelhttp/common.go | 2 +- + .../net/http/otelhttp/config.go | 14 +- + .../net/http/otelhttp/handler.go | 116 +- + .../http/otelhttp/internal/semconvutil/gen.go | 21 + + .../otelhttp/internal/semconvutil/httpconv.go | 552 + + .../otelhttp/internal/semconvutil/netconv.go | 368 + + .../net/http/otelhttp/transport.go | 10 +- + .../net/http/otelhttp/version.go | 6 +- + .../instrumentation/net/http/otelhttp/wrap.go | 14 +- + .../go.opentelemetry.io/otel/.codespellignore | 5 + + vendor/go.opentelemetry.io/otel/.codespellrc | 10 + + vendor/go.opentelemetry.io/otel/.gitignore | 7 +- + vendor/go.opentelemetry.io/otel/.golangci.yml | 104 +- + vendor/go.opentelemetry.io/otel/.lycheeignore | 3 + + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 852 +- + vendor/go.opentelemetry.io/otel/CODEOWNERS | 4 +- + .../go.opentelemetry.io/otel/CONTRIBUTING.md | 198 +- + vendor/go.opentelemetry.io/otel/Makefile | 142 +- + vendor/go.opentelemetry.io/otel/README.md | 54 +- + vendor/go.opentelemetry.io/otel/RELEASING.md | 35 +- + .../otel/attribute/filter.go | 60 + + .../go.opentelemetry.io/otel/attribute/set.go | 27 +- + .../otel/attribute/value.go | 95 +- + .../otel/baggage/baggage.go | 88 +- + .../go.opentelemetry.io/otel/codes/codes.go | 10 + + vendor/go.opentelemetry.io/otel/codes/doc.go | 2 +- + .../otel/exporters/otlp/otlptrace/README.md | 8 +- + .../otel/exporters/otlp/otlptrace/exporter.go | 7 +- + .../otlp/otlptrace/otlptracegrpc/client.go | 23 +- + .../internal/envconfig/envconfig.go | 94 +- + .../otlptrace/otlptracegrpc/internal/gen.go | 35 + + .../internal/otlpconfig/envconfig.go | 34 +- + .../internal/otlpconfig/options.go | 29 +- + .../internal/otlpconfig/optiontypes.go | 5 +- + .../internal/otlpconfig/tls.go | 5 +- + .../otlptracegrpc}/internal/partialsuccess.go | 39 +- + .../otlptracegrpc}/internal/retry/retry.go | 40 +- + .../otlp/otlptrace/otlptracegrpc/options.go | 4 +- + .../otlp/otlptrace/version.go} | 12 +- + vendor/go.opentelemetry.io/otel/handler.go | 65 +- + .../otel/internal/attribute/attribute.go | 111 + + .../go.opentelemetry.io/otel/internal/gen.go | 29 + + .../otel/internal/global/handler.go | 102 + + .../otel/internal/global/instruments.go | 371 + + .../otel/internal/global/internal_logging.go | 44 +- + .../otel/internal/global/meter.go | 354 + + .../otel/internal/global/state.go | 45 +- + .../otel/internal/global/trace.go | 7 + + vendor/go.opentelemetry.io/otel/metric.go | 53 + + .../otel/metric/asyncfloat64.go | 271 + + .../otel/metric/asyncint64.go | 269 + + .../go.opentelemetry.io/otel/metric/config.go | 25 +- + vendor/go.opentelemetry.io/otel/metric/doc.go | 157 +- + .../otel/metric/embedded/embedded.go | 234 + + .../otel/metric/global/global.go | 42 - + .../otel/metric/instrument.go | 357 + + .../instrument/asyncfloat64/asyncfloat64.go | 70 - + .../instrument/asyncint64/asyncint64.go | 70 - + .../otel/metric/instrument/config.go | 69 - + .../instrument/syncfloat64/syncfloat64.go | 56 - + .../metric/instrument/syncint64/syncint64.go | 56 - + .../metric/internal/global/instruments.go | 360 - + .../otel/metric/internal/global/meter.go | 347 - + .../otel/metric/internal/global/state.go | 68 - + .../go.opentelemetry.io/otel/metric/meter.go | 204 +- + .../go.opentelemetry.io/otel/metric/noop.go | 181 - + .../otel/metric/syncfloat64.go | 185 + + .../otel/metric/syncint64.go | 185 + + .../otel/propagation/trace_context.go | 6 +- + .../go.opentelemetry.io/otel/requirements.txt | 1 + + .../otel/sdk/internal/env/env.go | 10 +- + .../otel/sdk/internal/gen.go | 29 + + .../otel/sdk/internal/internal.go | 11 +- + .../otel/sdk/resource/auto.go | 68 +- + .../otel/sdk/resource/builtin.go | 10 +- + .../otel/sdk/resource/config.go | 7 + + .../otel/sdk/resource/container.go | 4 +- + .../otel/sdk/resource/doc.go | 3 + + .../otel/sdk/resource/env.go | 31 +- + .../otel/sdk/resource/host_id.go | 120 + + .../resource/host_id_bsd.go} | 19 +- + .../otel/sdk/resource/host_id_darwin.go | 19 + + .../resource/host_id_exec.go} | 27 +- + .../otel/sdk/resource/host_id_linux.go | 22 + + .../otel/sdk/resource/host_id_readfile.go | 28 + + .../otel/sdk/resource/host_id_unsupported.go | 36 + + .../otel/sdk/resource/host_id_windows.go | 48 + + .../otel/sdk/resource/os.go | 13 +- + .../otel/sdk/resource/os_release_unix.go | 8 +- + .../otel/sdk/resource/process.go | 54 +- + .../otel/sdk/resource/resource.go | 27 +- + .../otel/sdk/trace/batch_span_processor.go | 70 +- + .../otel/sdk/trace/provider.go | 129 +- + .../otel/sdk/trace/sampling.go | 14 +- + .../otel/sdk/trace/simple_span_processor.go | 9 +- + .../otel/sdk/trace/span.go | 66 +- + .../otel/sdk/trace/span_exporter.go | 2 +- + .../otel/sdk/trace/span_processor.go | 7 +- + .../otel/sdk/trace/tracer.go | 5 +- + .../otel/sdk/trace/tracetest/span.go | 1 + + .../otel/sdk/trace/version.go | 20 + + .../go.opentelemetry.io/otel/sdk/version.go | 20 + + .../otel/semconv/internal/http.go | 8 +- + .../otel/semconv/v1.17.0/doc.go | 20 + + .../otel/semconv/v1.17.0/event.go | 199 + + .../unit.go => semconv/v1.17.0/exception.go} | 11 +- + .../otel/semconv/v1.17.0/http.go | 21 + + .../otel/semconv/v1.17.0/resource.go | 2010 ++ + .../otel/semconv/v1.17.0/schema.go | 20 + + .../otel/semconv/v1.17.0/trace.go | 3375 +++ + .../otel/semconv/v1.21.0/attribute_group.go | 1877 ++ + .../otel/semconv/v1.21.0/doc.go | 20 + + .../otel/semconv/v1.21.0/event.go | 199 + + .../otel/semconv/v1.21.0/exception.go | 20 + + .../otel/semconv/v1.21.0/resource.go | 2310 ++ + .../otel/semconv/v1.21.0/schema.go | 20 + + .../otel/semconv/v1.21.0/trace.go | 2495 ++ + .../go.opentelemetry.io/otel/trace/config.go | 18 + + vendor/go.opentelemetry.io/otel/trace/doc.go | 66 +- + .../otel/trace/embedded/embedded.go | 56 + + vendor/go.opentelemetry.io/otel/trace/noop.go | 14 +- + .../otel/trace/noop/noop.go | 118 + + .../go.opentelemetry.io/otel/trace/trace.go | 45 +- + .../otel/trace/tracestate.go | 38 +- + vendor/go.opentelemetry.io/otel/version.go | 2 +- + vendor/go.opentelemetry.io/otel/versions.yaml | 29 +- + .../collector/trace/v1/trace_service.pb.go | 2 +- + .../collector/trace/v1/trace_service.pb.gw.go | 26 +- + .../trace/v1/trace_service_grpc.pb.go | 2 +- + .../proto/otlp/common/v1/common.pb.go | 9 +- + .../proto/otlp/resource/v1/resource.pb.go | 2 +- + .../proto/otlp/trace/v1/trace.pb.go | 26 +- + .../chacha20poly1305/chacha20poly1305.go | 98 + + .../chacha20poly1305_amd64.go | 86 + + .../chacha20poly1305/chacha20poly1305_amd64.s | 2715 ++ + .../chacha20poly1305_generic.go | 81 + + .../chacha20poly1305_noasm.go | 15 + + .../chacha20poly1305/xchacha20poly1305.go | 86 + + vendor/golang.org/x/crypto/hkdf/hkdf.go | 95 + + .../x/oauth2/google/appengine_gen1.go | 1 - + .../x/oauth2/google/appengine_gen2_flex.go | 1 - + vendor/golang.org/x/oauth2/google/default.go | 9 +- + .../x/oauth2/internal/client_appengine.go | 1 - + vendor/golang.org/x/oauth2/internal/oauth2.go | 2 +- + vendor/golang.org/x/oauth2/internal/token.go | 60 +- + vendor/golang.org/x/oauth2/token.go | 19 +- + .../api/compute/v0.alpha/compute-api.json | 3303 ++- + .../api/compute/v0.alpha/compute-gen.go | 7612 +++++- + .../api/compute/v0.beta/compute-api.json | 5218 +++- + .../api/compute/v0.beta/compute-gen.go | 21084 ++++++++++++---- + .../api/compute/v1/compute-api.json | 1099 +- + .../api/compute/v1/compute-gen.go | 2832 ++- + .../api/container/v1/container-api.json | 317 +- + .../api/container/v1/container-gen.go | 732 +- + .../api/googleapi/googleapi.go | 5 +- + vendor/google.golang.org/api/internal/cba.go | 282 + + .../api/internal/cert/secureconnect_cert.go | 3 +- + .../google.golang.org/api/internal/creds.go | 5 +- + vendor/google.golang.org/api/internal/dca.go | 144 - + .../api/internal/gensupport/media.go | 7 +- + .../api/internal/gensupport/resumable.go | 9 +- + .../api/internal/gensupport/send.go | 5 + + .../api/internal/impersonate/impersonate.go | 3 +- + vendor/google.golang.org/api/internal/s2a.go | 136 + + .../api/internal/settings.go | 1 + + .../google.golang.org/api/internal/version.go | 2 +- + .../api/monitoring/v3/monitoring-api.json | 54 +- + .../api/monitoring/v3/monitoring-gen.go | 108 +- + .../option/internaloption/internaloption.go | 15 + + .../google.golang.org/api/tpu/v1/tpu-api.json | 6 +- + .../google.golang.org/api/tpu/v1/tpu-gen.go | 14 +- + .../api/transport/http/dial.go | 11 +- + .../internal/socket/socket_service.pb.go | 2822 +++ + .../internal/socket/socket_service.proto | 460 + + .../google.golang.org/appengine/socket/doc.go | 10 + + .../appengine/socket/socket_classic.go | 290 + + .../appengine/socket/socket_vm.go | 64 + + .../api/annotations/field_behavior.pb.go | 22 +- + .../genproto/googleapis/api/tidyfix.go | 23 + + .../genproto/internal/doc.go | 17 + + vendor/google.golang.org/grpc/README.md | 60 +- + .../grpc/attributes/attributes.go | 59 +- + .../grpc/balancer/balancer.go | 62 +- + .../grpc/balancer/base/balancer.go | 22 +- + .../grpc/balancer_conn_wrappers.go | 75 +- + .../grpc_binarylog_v1/binarylog.pb.go | 2 +- + vendor/google.golang.org/grpc/call.go | 11 +- + vendor/google.golang.org/grpc/clientconn.go | 248 +- + vendor/google.golang.org/grpc/codec.go | 8 +- + vendor/google.golang.org/grpc/dialoptions.go | 42 +- + .../grpc/encoding/encoding.go | 17 +- + .../grpc/encoding/gzip/gzip.go | 4 +- + .../grpc/encoding/proto/proto.go | 4 +- + .../grpc/grpclog/component.go | 40 +- + .../google.golang.org/grpc/grpclog/grpclog.go | 30 +- + .../google.golang.org/grpc/grpclog/logger.go | 30 +- + .../grpc/grpclog/loggerv2.go | 56 +- + .../google.golang.org/grpc/health/client.go | 2 +- + .../grpc/health/grpc_health_v1/health.pb.go | 2 +- + .../health/grpc_health_v1/health_grpc.pb.go | 22 +- + vendor/google.golang.org/grpc/interceptor.go | 12 +- + .../grpc/internal/backoff/backoff.go | 36 + + .../balancer/gracefulswitch/gracefulswitch.go | 59 +- + .../grpc/internal/balancerload/load.go | 4 +- + .../grpc/internal/binarylog/method_logger.go | 4 +- + .../grpc/internal/buffer/unbounded.go | 18 +- + .../grpc/internal/channelz/funcs.go | 69 +- + .../grpc/internal/channelz/logging.go | 12 +- + .../grpc/internal/channelz/types.go | 5 + + .../grpc/internal/channelz/util_linux.go | 2 +- + .../grpc/internal/channelz/util_nonlinux.go | 2 +- + .../grpc/internal/credentials/credentials.go | 8 +- + .../grpc/internal/envconfig/envconfig.go | 12 +- + .../grpc/internal/grpclog/grpclog.go | 40 +- + .../grpc/internal/grpclog/prefixLogger.go | 8 +- + .../grpc/internal/grpcrand/grpcrand.go | 7 + + .../internal/grpcsync/callback_serializer.go | 54 +- + .../grpc/internal/grpcsync/pubsub.go | 121 + + .../grpc/{ => internal/idle}/idle.go | 188 +- + .../grpc/internal/internal.go | 51 +- + .../grpc/internal/metadata/metadata.go | 2 +- + .../grpc/internal/pretty/pretty.go | 2 +- + .../grpc/internal/resolver/config_selector.go | 4 +- + .../internal/resolver/dns/dns_resolver.go | 74 +- + .../grpc/internal/status/status.go | 36 +- + .../grpc/internal/transport/controlbuf.go | 16 +- + .../grpc/internal/transport/handler_server.go | 13 +- + .../grpc/internal/transport/http2_client.go | 56 +- + .../grpc/internal/transport/http2_server.go | 22 +- + .../grpc/internal/transport/http_util.go | 77 +- + .../grpc/internal/transport/transport.go | 19 +- + .../google.golang.org/grpc/picker_wrapper.go | 34 +- + vendor/google.golang.org/grpc/pickfirst.go | 88 +- + vendor/google.golang.org/grpc/preloader.go | 2 +- + .../grpc/resolver/manual/manual.go | 44 +- + vendor/google.golang.org/grpc/resolver/map.go | 10 +- + .../grpc/resolver/resolver.go | 84 +- + .../grpc/resolver_conn_wrapper.go | 10 +- + vendor/google.golang.org/grpc/rpc_util.go | 44 +- + vendor/google.golang.org/grpc/server.go | 231 +- + .../grpc/shared_buffer_pool.go | 154 + + vendor/google.golang.org/grpc/stats/stats.go | 14 +- + .../google.golang.org/grpc/status/status.go | 14 +- + vendor/google.golang.org/grpc/stream.go | 130 +- + vendor/google.golang.org/grpc/tap/tap.go | 6 + + vendor/google.golang.org/grpc/trace.go | 6 +- + vendor/google.golang.org/grpc/version.go | 2 +- + vendor/google.golang.org/grpc/vet.sh | 10 +- + vendor/modules.txt | 132 +- + 449 files changed, 79162 insertions(+), 16389 deletions(-) + rename LICENSES/vendor/{go.opentelemetry.io/otel/exporters/otlp/internal/retry => github.com/google/s2a-go}/LICENSE (98%) + delete mode 100644 vendor/github.com/cenkalti/backoff/v4/.travis.yml + create mode 100644 vendor/github.com/go-logr/logr/SECURITY.md + rename vendor/github.com/google/go-cmp/cmp/{export_unsafe.go => export.go} (94%) + delete mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go + rename vendor/github.com/google/go-cmp/cmp/internal/value/{pointer_unsafe.go => pointer.go} (95%) + delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go + create mode 100644 vendor/github.com/google/s2a-go/.gitignore + create mode 100644 vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md + create mode 100644 vendor/github.com/google/s2a-go/CONTRIBUTING.md + rename vendor/{go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE => github.com/google/s2a-go/LICENSE.md} (99%) + create mode 100644 vendor/github.com/google/s2a-go/README.md + create mode 100644 vendor/github.com/google/s2a-go/fallback/s2a_fallback.go + create mode 100644 vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go + create mode 100644 vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go + create mode 100644 vendor/github.com/google/s2a-go/internal/handshaker/service/service.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/record.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/ticketsender.go + create mode 100644 vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/README.md + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/s2av2.go + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go + create mode 100644 vendor/github.com/google/s2a-go/s2a.go + create mode 100644 vendor/github.com/google/s2a-go/s2a_options.go + create mode 100644 vendor/github.com/google/s2a-go/s2a_utils.go + create mode 100644 vendor/github.com/google/s2a-go/stream/s2a_stream.go + create mode 100644 vendor/github.com/google/s2a-go/testdata/client_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/testdata/client_key.pem + create mode 100644 vendor/github.com/google/s2a-go/testdata/server_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/testdata/server_key.pem + delete mode 100644 vendor/github.com/google/uuid/.travis.yml + create mode 100644 vendor/github.com/google/uuid/CHANGELOG.md + create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go + delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go + create mode 100644 vendor/go.opentelemetry.io/otel/.codespellignore + create mode 100644 vendor/go.opentelemetry.io/otel/.codespellrc + create mode 100644 vendor/go.opentelemetry.io/otel/attribute/filter.go + rename vendor/go.opentelemetry.io/otel/exporters/otlp/{ => otlptrace/otlptracegrpc}/internal/envconfig/envconfig.go (57%) + create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go + rename vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/{ => otlptracegrpc}/internal/otlpconfig/envconfig.go (74%) + rename vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/{ => otlptracegrpc}/internal/otlpconfig/options.go (89%) + rename vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/{ => otlptracegrpc}/internal/otlpconfig/optiontypes.go (90%) + rename vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/{ => otlptracegrpc}/internal/otlpconfig/tls.go (87%) + rename vendor/go.opentelemetry.io/otel/exporters/otlp/{ => otlptrace/otlptracegrpc}/internal/partialsuccess.go (64%) + rename vendor/go.opentelemetry.io/otel/exporters/otlp/{ => otlptrace/otlptracegrpc}/internal/retry/retry.go (80%) + rename vendor/go.opentelemetry.io/otel/{metric/unit/doc.go => exporters/otlp/otlptrace/version.go} (65%) + create mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go + create mode 100644 vendor/go.opentelemetry.io/otel/internal/gen.go + create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/handler.go + create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/instruments.go + create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/meter.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncint64.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/global/global.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/config.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/state.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/noop.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncfloat64.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncint64.go + create mode 100644 vendor/go.opentelemetry.io/otel/requirements.txt + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/gen.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go + rename vendor/go.opentelemetry.io/otel/{metric/instrument/instrument.go => sdk/resource/host_id_bsd.go} (54%) + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go + rename vendor/go.opentelemetry.io/otel/{exporters/otlp/internal/config.go => sdk/resource/host_id_exec.go} (50%) + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/version.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/version.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go + rename vendor/go.opentelemetry.io/otel/{metric/unit/unit.go => semconv/v1.17.0/exception.go} (70%) + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go + create mode 100644 vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go + create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop/noop.go + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go + create mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf.go + create mode 100644 vendor/google.golang.org/api/internal/cba.go + delete mode 100644 vendor/google.golang.org/api/internal/dca.go + create mode 100644 vendor/google.golang.org/api/internal/s2a.go + create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go + create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.proto + create mode 100644 vendor/google.golang.org/appengine/socket/doc.go + create mode 100644 vendor/google.golang.org/appengine/socket/socket_classic.go + create mode 100644 vendor/google.golang.org/appengine/socket/socket_vm.go + create mode 100644 vendor/google.golang.org/genproto/googleapis/api/tidyfix.go + create mode 100644 vendor/google.golang.org/genproto/internal/doc.go + create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go + rename vendor/google.golang.org/grpc/{ => internal/idle}/idle.go (61%) + create mode 100644 vendor/google.golang.org/grpc/shared_buffer_pool.go + +diff --git a/LICENSES/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE b/LICENSES/vendor/github.com/google/s2a-go/LICENSE +similarity index 98% +rename from LICENSES/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE +rename to LICENSES/vendor/github.com/google/s2a-go/LICENSE +index bdbfa6963be..5f39be4994d 100644 +--- a/LICENSES/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE ++++ b/LICENSES/vendor/github.com/google/s2a-go/LICENSE +@@ -1,4 +1,5 @@ +-= vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry licensed under: = ++= vendor/github.com/google/s2a-go licensed under: = ++ + + Apache License + Version 2.0, January 2004 +@@ -202,4 +203,4 @@ + See the License for the specific language governing permissions and + limitations under the License. + +-= vendor/go.opentelemetry.io/otel/LICENSE 86d3f3a95c324c9479bd8986968f4327 ++= vendor/github.com/google/s2a-go/LICENSE.md 3b83ef96387f14655fc854ddc3c6bd57 +diff --git a/go.mod b/go.mod +index 12b0421cf54..e61a6fb7a6f 100644 +--- a/go.mod ++++ b/go.mod +@@ -34,7 +34,7 @@ require ( + github.com/emicklei/go-restful/v3 v3.9.0 + github.com/evanphx/json-patch v4.12.0+incompatible + github.com/fsnotify/fsnotify v1.6.0 +- github.com/go-logr/logr v1.2.3 ++ github.com/go-logr/logr v1.3.0 + github.com/godbus/dbus/v5 v5.0.6 + github.com/gogo/protobuf v1.3.2 + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da +@@ -42,9 +42,9 @@ require ( + github.com/golang/protobuf v1.5.4 + github.com/google/cadvisor v0.46.1 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5 + github.com/libopenstorage/openstorage v1.0.0 + github.com/lithammer/dedent v1.1.0 +@@ -63,31 +63,31 @@ require ( + github.com/robfig/cron/v3 v3.0.1 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + github.com/vishvananda/netlink v1.1.0 + github.com/vmware/govmomi v0.30.6 + go.etcd.io/etcd/api/v3 v3.5.5 + go.etcd.io/etcd/client/pkg/v3 v3.5.5 + go.etcd.io/etcd/client/v3 v3.5.5 + go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 +- go.opentelemetry.io/otel v1.10.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 +- go.opentelemetry.io/otel/sdk v1.10.0 +- go.opentelemetry.io/otel/trace v1.10.0 +- go.opentelemetry.io/proto/otlp v0.19.0 ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 ++ go.opentelemetry.io/otel v1.20.0 ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 ++ go.opentelemetry.io/otel/sdk v1.20.0 ++ go.opentelemetry.io/otel/trace v1.20.0 ++ go.opentelemetry.io/proto/otlp v1.0.0 + go.uber.org/zap v1.19.0 + golang.org/x/crypto v0.21.0 + golang.org/x/net v0.23.0 +- golang.org/x/oauth2 v0.7.0 ++ golang.org/x/oauth2 v0.11.0 + golang.org/x/sync v0.3.0 + golang.org/x/sys v0.18.0 + golang.org/x/term v0.18.0 + golang.org/x/time v0.3.0 + golang.org/x/tools v0.12.0 +- google.golang.org/api v0.114.0 +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 +- google.golang.org/grpc v1.56.3 ++ google.golang.org/api v0.126.0 ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d ++ google.golang.org/grpc v1.59.0 + google.golang.org/protobuf v1.33.0 + gopkg.in/gcfg.v1 v1.2.3 + gopkg.in/square/go-jose.v2 v2.2.2 +@@ -130,7 +130,7 @@ require ( + ) + + require ( +- cloud.google.com/go/compute v1.19.1 // indirect ++ cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect +@@ -144,7 +144,7 @@ require ( + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect +@@ -175,15 +175,16 @@ require ( + github.com/google/btree v1.0.1 // indirect + github.com/google/cel-go v0.12.7 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect ++ github.com/google/s2a-go v0.1.4 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect +- github.com/googleapis/gax-go/v2 v2.7.1 // indirect ++ github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/gorilla/websocket v1.4.2 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect +@@ -211,7 +212,6 @@ require ( + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pquerna/cachecontrol v0.1.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect +- github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect +@@ -230,18 +230,17 @@ require ( + go.etcd.io/etcd/raft/v3 v3.5.5 // indirect + go.etcd.io/etcd/server/v3 v3.5.5 // indirect + go.opencensus.io v0.24.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/warnings.v0 v0.1.1 // indirect +diff --git a/go.sum b/go.sum +index cb8a55f84dd..a45da3021ab 100644 +--- a/go.sum ++++ b/go.sum +@@ -28,146 +28,143 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD + cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= + cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= + cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= + cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/azure-sdk-for-go v55.0.0+incompatible h1:L4/vUGbg1Xkw5L20LZD+hJI5I+ibWSytqQ68lTCfLwY= + github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +@@ -246,8 +243,8 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= + github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +@@ -367,10 +364,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m + github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= + github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= + github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= + github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +@@ -412,8 +409,8 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -445,9 +442,8 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw + github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= + github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -512,8 +508,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -537,20 +533,22 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= ++github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= ++github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= + github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= + github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= + github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +-github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +-github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= ++github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= ++github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= + github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= + github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -565,8 +563,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf + github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= + github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= + github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +@@ -629,8 +627,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -699,7 +697,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= + github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +@@ -774,8 +771,8 @@ github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzG + github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc= + github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= + github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +@@ -829,8 +826,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= + github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= + github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +@@ -888,43 +886,41 @@ go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= + go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 h1:KQjX0qQ8H21oBUAvFp4ZLKJMMLIluONvSPDAFIGmX58= + go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0/go.mod h1:DQYkU9srMFqLUTVA/7/WlRHdnYDB7wyMMlle2ktMjfI= + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= + go.opentelemetry.io/contrib/propagators/b3 v1.10.0 h1:6AD2VV8edRdEYNaD8cNckpzgdMLU2kbV9OYyxt2kvCg= + go.opentelemetry.io/contrib/propagators/b3 v1.10.0/go.mod h1:oxvamQ/mTDFQVugml/uFS59+aEUnFLhmd1wsG+n5MOE= + go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= + go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= + go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= + go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= + go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= + go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= ++go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= ++go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= + go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +@@ -941,6 +937,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= + golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= ++golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= + golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= + golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= + golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +@@ -979,6 +976,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= ++golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= + golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= + golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= + golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +@@ -1028,6 +1026,7 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx + golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= ++golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +@@ -1046,10 +1045,9 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ + golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -1062,6 +1060,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= + golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +@@ -1140,6 +1139,7 @@ golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBc + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= + golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +@@ -1156,6 +1156,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= ++golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= + golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= + golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +@@ -1224,6 +1225,7 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= + golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= + golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= + golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= ++golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= + golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= + golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +@@ -1260,8 +1262,8 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv + google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= + google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= + google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= +-google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +-google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= ++google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= ++google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= + google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= + google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= + google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +@@ -1330,13 +1332,13 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc + google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= + google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= + google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -1364,10 +1366,10 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD + google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= + google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= + google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= ++google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= + google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +@@ -1389,8 +1391,9 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= + gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +diff --git a/pkg/kubelet/cri/remote/remote_image.go b/pkg/kubelet/cri/remote/remote_image.go +index ee005d99038..05af66f78ee 100644 +--- a/pkg/kubelet/cri/remote/remote_image.go ++++ b/pkg/kubelet/cri/remote/remote_image.go +@@ -62,6 +62,7 @@ func NewRemoteImageService(endpoint string, connectionTimeout time.Duration, tp + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) + if utilfeature.DefaultFeatureGate.Enabled(features.KubeletTracing) { + tracingOpts := []otelgrpc.Option{ ++ otelgrpc.WithMessageEvents(otelgrpc.ReceivedEvents, otelgrpc.SentEvents), + otelgrpc.WithPropagators(tracing.Propagators()), + otelgrpc.WithTracerProvider(tp), + } +diff --git a/pkg/kubelet/cri/remote/remote_runtime.go b/pkg/kubelet/cri/remote/remote_runtime.go +index cd7b02efb74..e061b9ddf50 100644 +--- a/pkg/kubelet/cri/remote/remote_runtime.go ++++ b/pkg/kubelet/cri/remote/remote_runtime.go +@@ -84,6 +84,7 @@ func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration, t + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) + if utilfeature.DefaultFeatureGate.Enabled(features.KubeletTracing) { + tracingOpts := []otelgrpc.Option{ ++ otelgrpc.WithMessageEvents(otelgrpc.ReceivedEvents, otelgrpc.SentEvents), + otelgrpc.WithPropagators(tracing.Propagators()), + otelgrpc.WithTracerProvider(tp), + } +diff --git a/staging/src/k8s.io/api/go.mod b/staging/src/k8s.io/api/go.mod +index 7ad5786a060..eb43b852a6e 100644 +--- a/staging/src/k8s.io/api/go.mod ++++ b/staging/src/k8s.io/api/go.mod +@@ -6,17 +6,18 @@ go 1.19 + + require ( + github.com/gogo/protobuf v1.3.2 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/apimachinery v0.0.0 + ) + + require ( + github.com/davecgh/go-spew v1.1.1 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect ++ github.com/kr/text v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect +diff --git a/staging/src/k8s.io/api/go.sum b/staging/src/k8s.io/api/go.sum +index 227ae432b08..bc535ca807a 100644 +--- a/staging/src/k8s.io/api/go.sum ++++ b/staging/src/k8s.io/api/go.sum +@@ -1,27 +1,30 @@ + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= ++github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= + github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= + github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +@@ -31,23 +34,20 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ + github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= + github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -87,14 +87,13 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= + gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod +index a943ca7e40c..24e436deda7 100644 +--- a/staging/src/k8s.io/apiextensions-apiserver/go.mod ++++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod +@@ -9,18 +9,18 @@ require ( + github.com/gogo/protobuf v1.3.2 + github.com/google/cel-go v0.12.7 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + go.etcd.io/etcd/client/pkg/v3 v3.5.5 + go.etcd.io/etcd/client/v3 v3.5.5 +- go.opentelemetry.io/otel v1.10.0 +- go.opentelemetry.io/otel/trace v1.10.0 +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a +- google.golang.org/grpc v1.56.3 ++ go.opentelemetry.io/otel v1.20.0 ++ go.opentelemetry.io/otel/trace v1.20.0 ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d ++ google.golang.org/grpc v1.59.0 + google.golang.org/protobuf v1.33.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.0.0 +@@ -43,7 +43,7 @@ require ( + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect +@@ -53,7 +53,7 @@ require ( + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect +@@ -65,7 +65,7 @@ require ( + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect +@@ -94,21 +94,20 @@ require ( + go.etcd.io/etcd/pkg/v3 v3.5.5 // indirect + go.etcd.io/etcd/raft/v3 v3.5.5 // indirect + go.etcd.io/etcd/server/v3 v3.5.5 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect +@@ -116,8 +115,8 @@ require ( + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.12.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum +index 876a2959a15..6e63b66fc45 100644 +--- a/staging/src/k8s.io/apiextensions-apiserver/go.sum ++++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum +@@ -13,145 +13,144 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= + cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +@@ -187,8 +186,8 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= + github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +@@ -205,12 +204,9 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= +@@ -250,10 +246,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= + github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +@@ -281,8 +277,8 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +@@ -302,9 +298,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -355,8 +350,8 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -371,8 +366,8 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +@@ -387,8 +382,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf + github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= + github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= + github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +@@ -440,6 +435,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -479,7 +476,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= +@@ -533,6 +529,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T + github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +@@ -564,7 +562,6 @@ github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ai + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +@@ -572,10 +569,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= + github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= +@@ -611,39 +606,37 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= + go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= + go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= + go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= + go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= + go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= ++go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= ++go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= + go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +@@ -741,10 +734,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -941,13 +933,12 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -966,9 +957,8 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ + google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= + google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= + google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -988,8 +978,9 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +diff --git a/staging/src/k8s.io/apimachinery/go.mod b/staging/src/k8s.io/apimachinery/go.mod +index 059b9cae6da..9d402bb5d65 100644 +--- a/staging/src/k8s.io/apimachinery/go.mod ++++ b/staging/src/k8s.io/apimachinery/go.mod +@@ -12,13 +12,13 @@ require ( + github.com/gogo/protobuf v1.3.2 + github.com/golang/protobuf v1.5.4 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/moby/spdystream v0.2.0 + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + golang.org/x/net v0.23.0 + gopkg.in/inf.v0 v0.9.1 + k8s.io/klog/v2 v2.80.1 +@@ -30,19 +30,19 @@ require ( + ) + + require ( +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +- github.com/kr/text v0.2.0 // indirect ++ github.com/kr/pretty v0.3.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect +- github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/onsi/ginkgo/v2 v2.4.0 // indirect + github.com/onsi/gomega v1.23.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect ++ github.com/rogpeppe/go-internal v1.10.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect +- gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect ++ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + ) +diff --git a/staging/src/k8s.io/apimachinery/go.sum b/staging/src/k8s.io/apimachinery/go.sum +index 16c345e240c..c7dc1d239cd 100644 +--- a/staging/src/k8s.io/apimachinery/go.sum ++++ b/staging/src/k8s.io/apimachinery/go.sum +@@ -21,8 +21,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= + github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= + github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +@@ -50,20 +50,23 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -80,29 +83,28 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY + github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= + github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= ++github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= + github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= ++github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -179,15 +181,14 @@ google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGm + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= + gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod +index 385e10cc879..584ebe7a859 100644 +--- a/staging/src/k8s.io/apiserver/go.mod ++++ b/staging/src/k8s.io/apiserver/go.mod +@@ -14,31 +14,31 @@ require ( + github.com/gogo/protobuf v1.3.2 + github.com/google/cel-go v0.12.7 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + go.etcd.io/etcd/api/v3 v3.5.5 + go.etcd.io/etcd/client/pkg/v3 v3.5.5 + go.etcd.io/etcd/client/v3 v3.5.5 + go.etcd.io/etcd/server/v3 v3.5.5 +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 +- go.opentelemetry.io/otel v1.10.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 +- go.opentelemetry.io/otel/sdk v1.10.0 +- go.opentelemetry.io/otel/trace v1.10.0 ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 ++ go.opentelemetry.io/otel v1.20.0 ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 ++ go.opentelemetry.io/otel/sdk v1.20.0 ++ go.opentelemetry.io/otel/trace v1.20.0 + go.uber.org/zap v1.19.0 + golang.org/x/crypto v0.21.0 + golang.org/x/net v0.23.0 + golang.org/x/sync v0.3.0 + golang.org/x/sys v0.18.0 + golang.org/x/time v0.3.0 +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a +- google.golang.org/grpc v1.56.3 ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d ++ google.golang.org/grpc v1.59.0 + google.golang.org/protobuf v1.33.0 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/square/go-jose.v2 v2.2.2 +@@ -61,13 +61,13 @@ require ( + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect +@@ -78,7 +78,7 @@ require ( + github.com/gorilla/websocket v1.4.2 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect +@@ -105,18 +105,17 @@ require ( + go.etcd.io/etcd/client/v2 v2.305.5 // indirect + go.etcd.io/etcd/pkg/v3 v3.5.5 // indirect + go.etcd.io/etcd/raft/v3 v3.5.5 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum +index e0ef5691209..08b7fd87098 100644 +--- a/staging/src/k8s.io/apiserver/go.sum ++++ b/staging/src/k8s.io/apiserver/go.sum +@@ -13,145 +13,144 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= + cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +@@ -186,8 +185,8 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= + github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +@@ -204,12 +203,9 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= +@@ -250,10 +246,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= + github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +@@ -280,8 +276,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +@@ -301,9 +297,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -354,8 +349,8 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -370,8 +365,8 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +@@ -386,8 +381,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf + github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= + github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= + github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +@@ -439,6 +434,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -476,7 +473,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= +@@ -531,6 +527,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T + github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +@@ -562,7 +560,6 @@ github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ai + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +@@ -570,10 +567,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= + github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= +@@ -608,39 +603,37 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= + go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= + go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= + go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= + go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= + go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= ++go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= ++go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= + go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +@@ -737,10 +730,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -935,13 +927,12 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -960,9 +951,8 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ + google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= + google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= + google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -982,8 +972,9 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go +index 67a1790c56a..fe6c1598600 100644 +--- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go ++++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go +@@ -13,13 +13,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ +- + package filters + + import ( + "net/http" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" + + tracing "k8s.io/component-base/tracing" +@@ -32,7 +32,16 @@ func WithTracing(handler http.Handler, tp trace.TracerProvider) http.Handler { + otelhttp.WithPublicEndpoint(), + otelhttp.WithTracerProvider(tp), + } ++ wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ++ // Add the http.target attribute to the otelhttp span ++ // Workaround for https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3743 ++ if r.URL != nil { ++ trace.SpanFromContext(r.Context()).SetAttributes(semconv.HTTPTarget(r.URL.RequestURI())) ++ } ++ handler.ServeHTTP(w, r) ++ }) + // With Noop TracerProvider, the otelhttp still handles context propagation. + // See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough +- return otelhttp.NewHandler(handler, "KubernetesAPI", opts...) ++ return otelhttp.NewHandler(wrappedHandler, "KubernetesAPI", opts...) + } ++ +diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +index 64bcabadb97..185124c6bfd 100644 +--- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go ++++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +@@ -299,6 +299,7 @@ var newETCD3Client = func(c storagebackend.TransportConfig) (*clientv3.Client, e + } + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) { + tracingOpts := []otelgrpc.Option{ ++ otelgrpc.WithMessageEvents(otelgrpc.ReceivedEvents, otelgrpc.SentEvents), + otelgrpc.WithPropagators(tracing.Propagators()), + otelgrpc.WithTracerProvider(c.TracerProvider), + } +diff --git a/staging/src/k8s.io/cli-runtime/go.mod b/staging/src/k8s.io/cli-runtime/go.mod +index 52030094c1d..1a9179b7c78 100644 +--- a/staging/src/k8s.io/cli-runtime/go.mod ++++ b/staging/src/k8s.io/cli-runtime/go.mod +@@ -8,11 +8,11 @@ require ( + github.com/davecgh/go-spew v1.1.1 + github.com/evanphx/json-patch v4.12.0+incompatible + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + golang.org/x/text v0.14.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.0.0 +@@ -28,14 +28,14 @@ require ( + require ( + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/go-errors/errors v1.0.1 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.19.14 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.0.1 // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect +@@ -54,7 +54,7 @@ require ( + github.com/xlab/treeprint v1.1.0 // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/time v0.3.0 // indirect +diff --git a/staging/src/k8s.io/cli-runtime/go.sum b/staging/src/k8s.io/cli-runtime/go.sum +index 670fb570d6b..76d35cf4de4 100644 +--- a/staging/src/k8s.io/cli-runtime/go.sum ++++ b/staging/src/k8s.io/cli-runtime/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +@@ -27,8 +28,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL + github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= + github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= + github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -64,15 +65,15 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +@@ -87,6 +88,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -109,7 +112,6 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +@@ -120,6 +122,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= + github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +@@ -129,17 +133,14 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= + github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +@@ -169,8 +170,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -231,8 +232,9 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/client-go/go.mod b/staging/src/k8s.io/client-go/go.mod +index 9d1bdd033d4..c364fed02cf 100644 +--- a/staging/src/k8s.io/client-go/go.mod ++++ b/staging/src/k8s.io/client-go/go.mod +@@ -11,16 +11,16 @@ require ( + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da + github.com/golang/protobuf v1.5.4 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 + github.com/imdario/mergo v0.3.6 + github.com/peterbourgon/diskv v2.0.1+incompatible + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + golang.org/x/net v0.23.0 +- golang.org/x/oauth2 v0.7.0 ++ golang.org/x/oauth2 v0.11.0 + golang.org/x/term v0.18.0 + golang.org/x/time v0.3.0 + google.golang.org/protobuf v1.33.0 +@@ -35,7 +35,7 @@ require ( + + require ( + github.com/emicklei/go-restful/v3 v3.9.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.19.14 // indirect +diff --git a/staging/src/k8s.io/client-go/go.sum b/staging/src/k8s.io/client-go/go.sum +index d03a4cd77eb..bc22e7110b6 100644 +--- a/staging/src/k8s.io/client-go/go.sum ++++ b/staging/src/k8s.io/client-go/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +@@ -23,8 +24,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= + github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -61,13 +62,13 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +@@ -81,6 +82,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -100,7 +103,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -113,19 +115,18 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -151,8 +152,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -212,8 +213,9 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/cloud-provider/go.mod b/staging/src/k8s.io/cloud-provider/go.mod +index c55b59ef1b3..e26fb42ce8b 100644 +--- a/staging/src/k8s.io/cloud-provider/go.mod ++++ b/staging/src/k8s.io/cloud-provider/go.mod +@@ -5,10 +5,10 @@ module k8s.io/cloud-provider + go 1.19 + + require ( +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/apiserver v0.0.0 +@@ -26,7 +26,7 @@ require ( + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect +@@ -35,7 +35,7 @@ require ( + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect +@@ -46,9 +46,9 @@ require ( + github.com/google/cel-go v0.12.7 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect +@@ -69,32 +69,31 @@ require ( + go.etcd.io/etcd/api/v3 v3.5.5 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect + go.etcd.io/etcd/client/v3 v3.5.5 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/otel/trace v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel v1.20.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/otel/trace v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect +diff --git a/staging/src/k8s.io/cloud-provider/go.sum b/staging/src/k8s.io/cloud-provider/go.sum +index b6ccac78637..736c8cc0cea 100644 +--- a/staging/src/k8s.io/cloud-provider/go.sum ++++ b/staging/src/k8s.io/cloud-provider/go.sum +@@ -13,144 +13,143 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +@@ -159,7 +158,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +@@ -180,11 +178,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -195,12 +192,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +@@ -226,12 +219,11 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= + github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= + github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +@@ -254,8 +246,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +@@ -273,9 +265,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -323,9 +314,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -340,8 +330,8 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -353,8 +343,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -383,6 +373,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -410,7 +402,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -453,6 +444,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -461,7 +454,6 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE + github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +@@ -471,7 +463,6 @@ github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ai + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +@@ -479,10 +470,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -513,32 +502,30 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= + go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= ++go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= ++go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +@@ -626,10 +613,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -814,13 +800,12 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -836,11 +821,9 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM + google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= + google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= + google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= + google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -853,15 +836,15 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +diff --git a/staging/src/k8s.io/cluster-bootstrap/go.mod b/staging/src/k8s.io/cluster-bootstrap/go.mod +index 4de80942801..b6d4cdbc5e0 100644 +--- a/staging/src/k8s.io/cluster-bootstrap/go.mod ++++ b/staging/src/k8s.io/cluster-bootstrap/go.mod +@@ -5,7 +5,7 @@ module k8s.io/cluster-bootstrap + go 1.19 + + require ( +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + gopkg.in/square/go-jose.v2 v2.2.2 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 +@@ -14,7 +14,7 @@ require ( + + require ( + github.com/davecgh/go-spew v1.1.1 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +diff --git a/staging/src/k8s.io/cluster-bootstrap/go.sum b/staging/src/k8s.io/cluster-bootstrap/go.sum +index 3ec88eb1f95..d649cce1f8d 100644 +--- a/staging/src/k8s.io/cluster-bootstrap/go.sum ++++ b/staging/src/k8s.io/cluster-bootstrap/go.sum +@@ -5,22 +5,24 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs + github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= + github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +@@ -30,23 +32,20 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ + github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= + github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -86,8 +85,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= +@@ -95,7 +94,6 @@ gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= + gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +diff --git a/staging/src/k8s.io/code-generator/examples/go.sum b/staging/src/k8s.io/code-generator/examples/go.sum +index be31bede654..d33e674a206 100644 +--- a/staging/src/k8s.io/code-generator/examples/go.sum ++++ b/staging/src/k8s.io/code-generator/examples/go.sum +@@ -1,36 +1,693 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= ++cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= ++cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= ++cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= ++cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= ++cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= ++cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= ++cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= ++cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= ++cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= ++cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= ++cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= ++cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= ++cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= ++cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= ++cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= ++cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= ++cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= ++cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= ++cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= ++cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= ++cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= ++cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= ++cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= ++cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= ++cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= ++cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= ++cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= ++cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= ++cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= ++cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= ++cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= ++cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= ++cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= ++cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= ++cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= ++cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= ++cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= ++cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= ++cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= ++cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= ++cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= ++cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= ++cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= ++cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= ++cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= ++cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= ++cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= ++cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= ++cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= ++cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= ++cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= ++cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= ++cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= ++cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= ++cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= ++cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= ++cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= ++cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= ++cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= ++cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= ++cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= ++cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= ++cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= ++cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= ++cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= ++cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= ++cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= ++cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= ++cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= ++cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= ++cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= ++cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= ++cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= ++cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= ++cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= ++cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= ++cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= ++cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= ++cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= ++cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= ++cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= ++cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= ++cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= ++cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= ++cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= ++cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= ++cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= ++cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= ++cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= ++cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= ++cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= ++cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= ++cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= ++cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= ++cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= ++cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= ++cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= ++cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= ++cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= ++cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= ++cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= ++cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= ++cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= ++cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= ++cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= ++cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= ++cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= ++cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= ++cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= ++cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= ++cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= ++cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= ++cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= ++cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= ++cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= ++cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= ++cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= ++cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= ++cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= ++cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= ++cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= ++cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= ++cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= ++cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= ++cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= ++cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= ++cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= ++cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= ++cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= ++cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= ++cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= ++cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= ++cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= ++cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= ++cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= ++cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= ++cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= ++cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= ++cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= ++cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= ++cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= ++cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= ++cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= ++cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= ++cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= ++cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= ++cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= ++cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= ++cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= ++cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= ++cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= ++cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= ++cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= ++cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= ++cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= ++cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= ++cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= ++cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= ++cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= ++cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= ++cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= ++cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= ++cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= ++cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= ++cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= ++cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= ++cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= ++cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= ++cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= ++cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= ++cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= ++cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= ++cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= + cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= ++cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= ++cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= ++cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= ++cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= ++cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= ++cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= ++cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= ++cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= ++cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= ++cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= ++cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= ++cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= ++cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= ++cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= ++cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= ++cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= ++cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= ++cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= ++cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= ++cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= ++cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= ++cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= ++cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= ++cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= ++cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= ++cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= ++cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= ++cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= ++cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= ++cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= ++cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= ++cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= ++cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= ++cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= ++cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= ++cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= ++cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= ++cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= ++cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= ++cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= ++cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= ++cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= ++cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= ++cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= ++cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= ++cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= ++cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= ++cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= ++cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= ++cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= ++cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= ++cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= ++cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= ++cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= ++cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= ++cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= ++cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= ++cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= ++cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= ++cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= ++cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= ++cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= ++cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= ++cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= ++cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= ++cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= ++cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= ++cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= ++cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= ++cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= ++cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= ++cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= ++cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= ++cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= ++cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= ++cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= ++cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= ++cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= ++cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= ++cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= ++cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= ++cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= ++cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= ++cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= ++cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= ++cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= ++cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= ++cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= ++cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= ++cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= ++cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= ++cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= ++cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= ++cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= ++cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= ++cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= ++cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= ++cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= ++cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= ++cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= ++cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= ++cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= ++cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= ++cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= ++cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= ++cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= ++cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= ++cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= ++cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= ++cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= ++cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= ++cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= ++cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= ++cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= ++cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= ++cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= ++cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= ++cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= ++cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= ++cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= ++cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= ++cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= ++cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= ++cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= ++cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= ++cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= ++cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= ++cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= ++cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= ++cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= ++cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= ++cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= ++cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= ++cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= ++cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= ++cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= ++cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= ++cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= ++cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= ++cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= ++cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= ++cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= ++cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= ++cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= ++cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= ++cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= ++cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= ++cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= ++cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= ++cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= ++cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= ++cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= ++cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= ++cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= ++cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= ++cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= ++cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= ++cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= ++cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= ++cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= ++cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= ++cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= ++cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= ++cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= ++cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= ++cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= ++cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= ++cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= ++cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= ++cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= ++cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= ++cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= ++cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= ++cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= ++cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= ++cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= ++cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= ++cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= ++cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= ++cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= ++cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= ++cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= ++cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= ++cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= ++cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= ++cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= ++cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= ++cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= ++cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= ++cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= ++cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= ++cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= ++cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= ++cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= ++cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= ++cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= ++cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= ++cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= ++cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= ++cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= ++cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= ++cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= ++cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= ++cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= ++cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= ++cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= ++cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= ++cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= ++cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= ++cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= ++cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= ++cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= ++cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= ++cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= ++cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= ++cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= ++cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= ++cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= ++cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= ++cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= ++cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= ++cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= ++cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= ++cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= ++cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= ++cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= ++cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= ++cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= ++cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= ++cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= ++cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= ++cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= ++cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= ++cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= ++cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= ++cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= ++cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= ++cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= ++cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= ++cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= ++cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= ++cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= ++cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= ++cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= ++cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= ++cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= ++cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= ++cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= ++cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= ++cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= ++cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= ++cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= ++cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= ++cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= ++cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= ++cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= ++cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= ++cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= ++cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= ++cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= ++cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= ++cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= ++cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= ++cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= ++cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= ++cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= ++cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= ++cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= ++cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= ++cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= ++cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= ++cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= ++cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= ++cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= ++cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= ++cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= ++cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= ++cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= ++cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= ++cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= ++cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= ++cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= ++cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= ++cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= ++cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= ++cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= ++cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= ++cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= ++cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= ++cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= ++cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= ++cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= ++cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= ++cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= ++cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= ++cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= ++cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= ++cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= ++cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= ++cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= ++cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= ++cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= ++cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= ++cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= ++cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= ++cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= ++cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= ++cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= ++cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= ++cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= ++cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= ++cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= ++cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= ++cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= ++cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= ++cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= ++cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= ++cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= ++cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= ++cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= ++cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= ++cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= ++cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= ++cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= ++cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= ++cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= ++cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= ++cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= ++cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= ++cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= ++cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= ++cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= ++cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= ++cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= ++cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= ++cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= ++cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= ++cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= ++cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= ++cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= ++cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= ++cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= ++cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= ++cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= ++cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= ++cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= ++cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= ++cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= ++cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= ++cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= ++cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= ++cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= ++cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= ++cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= ++cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= ++cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= ++cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= ++cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= ++cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= ++cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= ++cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= ++cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= ++cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= ++cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= ++cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= ++cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= ++cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= ++cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= ++cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= ++cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= ++cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= ++cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= ++cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= ++cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= ++cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= ++cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= ++cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= ++cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= ++cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= ++cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= ++cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= ++cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= ++cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= ++cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= ++cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= ++cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= ++cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= ++cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= ++cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= ++cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= ++cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= ++cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= ++cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= ++cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= ++cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= ++cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= ++cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= ++cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= ++cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= ++cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= ++cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= ++cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= ++cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= ++cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= ++cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= ++cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= ++cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= ++cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= ++cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= ++cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= ++gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= ++git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= ++github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= ++github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= ++github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= ++github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= ++github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= ++github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= ++github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= ++github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= ++github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= ++github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= ++github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= ++github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= ++github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= ++github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= ++github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= ++github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= ++github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= ++github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= ++github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= ++github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= ++github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= ++github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= ++github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= ++github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= ++github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= + github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= ++github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= + github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= + github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= + github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= + github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= ++github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= ++github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= ++github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= ++github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= ++github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= ++github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= ++github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= ++github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= ++github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= ++github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= ++github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= ++github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= ++github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= ++github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= ++github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= + github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= + github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= ++github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= ++github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= ++github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= ++github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= ++github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= ++github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= ++github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= ++github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= ++github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= ++github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= ++github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= + github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= + github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= + github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -40,15 +697,35 @@ github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXym + github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= + github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= + github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= ++github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= ++github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= ++github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= ++github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= ++github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= ++github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= ++github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= ++github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= ++github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= ++github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= ++github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= ++github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= ++github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= ++github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= ++github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= ++github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= + github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= + github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= + github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= ++github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= ++github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= ++github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= + github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= + github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= + github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +@@ -56,49 +733,140 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W + github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= + github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= + github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= ++github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= + github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= ++github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= + github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= ++github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= + github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= ++github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= ++github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= ++github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= ++github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= + github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= ++github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= + github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= + github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= + github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= ++github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= ++github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= ++github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= ++github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= ++github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= ++github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= ++github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= ++github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= + github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= + github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= ++github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= ++github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= ++github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= ++github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= ++github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= ++github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= ++github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= ++github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= ++github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= ++github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= ++github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= ++github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= ++github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= ++github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= ++github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= ++github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= ++github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= ++github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= ++github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= ++github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= ++github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= ++github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= ++github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= ++github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= + github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= ++github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= ++github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= ++github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= ++github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= ++github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= ++github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= ++github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= ++github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= ++github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= ++github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= ++github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= ++github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= ++github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= ++github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= ++github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= ++github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= ++github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= ++github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= ++github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= ++github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= + github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= ++github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= ++github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= ++github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= ++github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= ++github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= + github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= ++github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= ++github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= ++github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= ++github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= ++github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= ++github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= ++github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= ++github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= ++github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= ++github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= ++github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= ++github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= + github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= + github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= + github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= + github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= ++github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= ++github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= ++github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= ++github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= ++github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= + github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= + github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +@@ -110,7 +878,6 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= + github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +@@ -134,159 +901,733 @@ github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ + github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= + github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= ++github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= ++github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= ++github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= ++github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= ++github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= ++github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= ++github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= ++github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= ++github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= ++github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= ++github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= ++github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= ++github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= ++github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= ++github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= ++github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= ++github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= ++github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= ++github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= ++github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= ++github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= ++github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= ++github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= ++github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= ++github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= ++github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= ++github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= + github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= + github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= ++github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= ++github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= ++go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= ++go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= ++go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= ++go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= ++go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= ++go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= ++go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= ++go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= ++go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= ++go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= ++golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= ++golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= ++golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= + golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= ++golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= ++golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= ++golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= + golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= ++golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= ++golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= ++golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= + golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= + golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= ++golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= ++golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= + golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= ++golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= ++golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= ++golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= ++golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= ++golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= ++golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= ++golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= ++golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= ++golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= ++golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= ++golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= ++golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= ++golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= ++golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= ++golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= ++golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= ++golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= ++golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= ++golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= ++golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= ++golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= ++golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= ++golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= ++golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= ++golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= + golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= + golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= ++golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= + golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= ++golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= ++golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= ++golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= ++golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= ++golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= ++golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= ++golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= ++golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= ++golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= ++golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= ++golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= ++golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= ++golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= ++golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= + golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= ++golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= ++golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= ++golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= ++golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= ++golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= + golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= + golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= ++golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= + golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= ++golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= + golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= ++golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= + golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= ++golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= ++golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= + golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= + golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= ++golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= ++golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= ++golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= ++golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= ++golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= ++golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= ++golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= ++golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= ++golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= ++golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= ++golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= + golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= ++golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= ++golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= ++golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= ++golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= ++golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= ++golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= ++golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= ++golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= ++golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= + golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= ++golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= ++golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= + golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= ++golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= ++golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= ++golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= ++golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= ++golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= ++golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= ++golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= ++golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= ++golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= + golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= ++golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= ++golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= ++golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= + golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= ++golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= ++golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= ++golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= + golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= ++golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= ++golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= + golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= + golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= ++golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= + golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= ++golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= ++golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= ++golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= ++golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= ++golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= ++golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= ++golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= ++golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= ++golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= ++golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= ++golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= ++golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= ++golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= ++golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= ++golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= ++golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= ++golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= + golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= ++golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= + golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= + golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= + golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= ++golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= ++golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= ++golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= + golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= ++golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= + golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= + golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= ++golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= + golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= + golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= + golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= ++golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= ++golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= ++golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= ++golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= ++golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= + golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= ++golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= ++golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= + golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= ++golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= + golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= ++golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= + golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= ++golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= ++golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= ++golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= ++golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= ++golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= + golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= + golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= ++golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= + golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= + golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= ++golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= + golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= + golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= ++golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= ++golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= ++golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= ++golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= + golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= ++golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= ++golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= ++golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= ++golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= + golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= ++golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= ++golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= ++golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= ++golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= ++golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= ++golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= ++golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= ++golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= ++golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= ++golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= ++golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= ++golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= ++golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= + golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= ++golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= ++golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= ++golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= + golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= ++golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= ++golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= ++golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= ++golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= ++golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= ++golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= ++golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= ++golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= + golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= ++golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= + golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= ++golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= ++golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= ++golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= ++golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= ++golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= ++golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= ++golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= ++golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= + golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= + golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= + golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= ++golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= + golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= ++golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= ++golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= ++golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= ++golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= ++golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= ++gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= ++gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= ++gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= ++gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= ++gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= ++gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= ++gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= ++gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= ++google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= ++google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= ++google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= ++google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= ++google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= ++google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= ++google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= ++google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= ++google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= ++google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= ++google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= ++google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= ++google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= ++google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= ++google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= ++google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= ++google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= ++google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= ++google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= ++google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= ++google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= ++google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= ++google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= ++google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= ++google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= ++google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= ++google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= ++google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= ++google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= ++google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= ++google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= ++google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= ++google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= ++google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= ++google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= ++google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= ++google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= ++google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= ++google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= ++google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= ++google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= ++google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= ++google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= ++google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= ++google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= ++google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= ++google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= ++google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= ++google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= ++google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= ++google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= ++google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= ++google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= ++google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= ++google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= ++google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= ++google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= ++google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= ++google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= ++google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= ++google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= + google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= + google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= ++google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= ++google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= ++google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= ++google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= + google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= + google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= + google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= ++google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= ++google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= ++google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= ++google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= ++google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= ++google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= ++google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= ++google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= ++google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= ++google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= ++google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= ++google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= ++google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= ++google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= ++google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= ++google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= ++google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= ++google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= ++google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= ++google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= ++google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= ++google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= ++google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= ++google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= ++google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= ++google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= ++google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= ++google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= ++google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= ++google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= ++google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= ++google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= ++google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= ++google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= ++google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= ++google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= ++google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= ++google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= ++google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= ++google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= ++google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= ++google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= ++google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= ++google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= ++google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= ++google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= ++google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= ++google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= ++google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= ++google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= ++google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= ++google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= ++google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= ++google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= ++google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= ++google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= ++google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= ++google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= ++google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= ++google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= ++google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= ++google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= ++google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= ++google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= ++google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= ++google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= ++google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= ++google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= ++google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= ++google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= ++google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= ++google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= ++google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= ++google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= ++google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= ++google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= ++google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= ++google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= ++google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= ++google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= ++google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= ++google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= ++google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= ++google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= ++google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= ++google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= ++google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= ++google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= ++google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= ++google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= ++google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= ++google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= ++google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= ++google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= ++google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= ++google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= ++google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= ++google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= ++google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= ++google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= ++google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= ++google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= ++google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= ++google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= ++google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= ++google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= ++google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= ++google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= ++google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= ++google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= ++google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= ++google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= ++google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= ++google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= ++google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= ++google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= ++google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= ++google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= ++google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= ++google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= ++google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= ++google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= ++google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= ++google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= ++google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= ++google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= + google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= ++google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= ++google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= + google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= ++google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= ++google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= ++google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= ++google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= ++google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= ++google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= ++google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= ++google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= ++google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= ++google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= ++google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= ++google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= ++google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= ++google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= ++google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= ++google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= ++google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= ++google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= ++google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= ++google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= ++google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= ++google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= ++google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= ++google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= ++google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= ++google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= ++google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= ++google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= ++google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= ++google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= ++google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= ++google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= ++google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= ++google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= ++google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -296,22 +1637,30 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 + google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= + google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= + google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= ++google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= ++google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= ++google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= ++google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= ++google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= ++gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= ++gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +@@ -323,7 +1672,13 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= ++honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= ++honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= + honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= ++honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= ++honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= ++honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= ++honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= + k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= + k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= + k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +@@ -334,6 +1689,44 @@ k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhkl + k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= + k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= + k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= ++lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= ++lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= ++modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= ++modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= ++modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= ++modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= ++modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= ++modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= ++modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= ++modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= ++modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= ++modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= ++modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= ++modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= ++modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= ++modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= ++modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= ++modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= ++modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= ++modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= ++modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= ++modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= ++modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= ++modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= ++modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= ++modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= ++modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= ++modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= ++modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= ++modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= ++modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= ++modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= ++modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= ++modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= ++rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= ++rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= ++rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= ++rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +diff --git a/staging/src/k8s.io/code-generator/go.mod b/staging/src/k8s.io/code-generator/go.mod +index bf08cceef01..e5e1a4a1257 100644 +--- a/staging/src/k8s.io/code-generator/go.mod ++++ b/staging/src/k8s.io/code-generator/go.mod +@@ -16,26 +16,29 @@ require ( + + require ( + github.com/emicklei/go-restful/v3 v3.9.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.19.14 // indirect + github.com/golang/protobuf v1.5.4 // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect ++ github.com/kr/pretty v0.3.1 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.4.0 // indirect + github.com/onsi/gomega v1.23.0 // indirect +- github.com/stretchr/testify v1.8.1 // indirect ++ github.com/rogpeppe/go-internal v1.10.0 // indirect ++ github.com/stretchr/testify v1.8.4 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/tools v0.12.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect ++ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +diff --git a/staging/src/k8s.io/code-generator/go.sum b/staging/src/k8s.io/code-generator/go.sum +index 569288a7e2c..5921f397072 100644 +--- a/staging/src/k8s.io/code-generator/go.sum ++++ b/staging/src/k8s.io/code-generator/go.sum +@@ -17,8 +17,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= + github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -51,8 +51,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -66,6 +66,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -81,28 +84,28 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ + github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= + github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= + github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= + github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= ++github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= ++github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +@@ -185,8 +188,9 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +diff --git a/staging/src/k8s.io/component-base/go.mod b/staging/src/k8s.io/component-base/go.mod +index 8df337d5d6c..4895a76f2cd 100644 +--- a/staging/src/k8s.io/component-base/go.mod ++++ b/staging/src/k8s.io/component-base/go.mod +@@ -6,9 +6,9 @@ go 1.19 + + require ( + github.com/blang/semver/v4 v4.0.0 +- github.com/go-logr/logr v1.2.3 ++ github.com/go-logr/logr v1.3.0 + github.com/go-logr/zapr v1.2.3 +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae + github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_model v0.3.0 +@@ -16,12 +16,12 @@ require ( + github.com/prometheus/procfs v0.8.0 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 +- go.opentelemetry.io/otel v1.10.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 +- go.opentelemetry.io/otel/sdk v1.10.0 +- go.opentelemetry.io/otel/trace v1.10.0 ++ github.com/stretchr/testify v1.8.4 ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 ++ go.opentelemetry.io/otel v1.20.0 ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 ++ go.opentelemetry.io/otel/sdk v1.20.0 ++ go.opentelemetry.io/otel/trace v1.20.0 + go.uber.org/zap v1.19.0 + golang.org/x/sys v0.18.0 + k8s.io/apimachinery v0.0.0 +@@ -34,7 +34,7 @@ require ( + require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/beorn7/perks v1.0.1 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect +@@ -47,7 +47,7 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +@@ -57,22 +57,20 @@ require ( + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum +index 8347bcbfe5f..91b415362a5 100644 +--- a/staging/src/k8s.io/component-base/go.sum ++++ b/staging/src/k8s.io/component-base/go.sum +@@ -13,148 +13,31 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +-cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +-cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +@@ -173,11 +56,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -187,13 +69,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= + github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +@@ -209,17 +85,13 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= + github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +@@ -233,8 +105,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -252,9 +124,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -299,9 +170,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -315,14 +185,12 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -348,6 +216,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -375,7 +245,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -416,11 +285,12 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= + github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +@@ -429,7 +299,6 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +@@ -437,10 +306,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +@@ -450,30 +317,27 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= ++go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= ++go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= +@@ -545,7 +409,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R + golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= + golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +@@ -557,10 +420,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -571,7 +433,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ + golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +-golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +@@ -606,9 +468,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +@@ -625,7 +485,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +@@ -731,7 +590,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG + google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +@@ -739,13 +597,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -758,12 +615,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa + google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= + google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -776,21 +629,20 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/component-helpers/go.mod b/staging/src/k8s.io/component-helpers/go.mod +index 3a8a6d2c254..872ca6b4938 100644 +--- a/staging/src/k8s.io/component-helpers/go.mod ++++ b/staging/src/k8s.io/component-helpers/go.mod +@@ -5,7 +5,7 @@ module k8s.io/component-helpers + go 1.19 + + require ( +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/client-go v0.0.0 +@@ -17,7 +17,7 @@ require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.19.14 // indirect +@@ -33,7 +33,7 @@ require ( + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect +diff --git a/staging/src/k8s.io/component-helpers/go.sum b/staging/src/k8s.io/component-helpers/go.sum +index 7347e9022b6..3205c4d1ffa 100644 +--- a/staging/src/k8s.io/component-helpers/go.sum ++++ b/staging/src/k8s.io/component-helpers/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +@@ -21,8 +22,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= + github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -57,13 +58,13 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +@@ -74,6 +75,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -92,7 +95,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -104,6 +106,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +@@ -111,8 +115,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -138,8 +142,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -199,8 +203,9 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/controller-manager/go.mod b/staging/src/k8s.io/controller-manager/go.mod +index a35c4d58d76..49eac8b920e 100644 +--- a/staging/src/k8s.io/controller-manager/go.mod ++++ b/staging/src/k8s.io/controller-manager/go.mod +@@ -6,8 +6,8 @@ go 1.19 + + require ( + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 +- golang.org/x/oauth2 v0.7.0 ++ github.com/stretchr/testify v1.8.4 ++ golang.org/x/oauth2 v0.11.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/apiserver v0.0.0 +@@ -22,7 +22,7 @@ require ( + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect +@@ -31,7 +31,7 @@ require ( + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect +@@ -41,11 +41,11 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.12.7 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect +@@ -66,16 +66,15 @@ require ( + go.etcd.io/etcd/api/v3 v3.5.5 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect + go.etcd.io/etcd/client/v3 v3.5.5 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/otel/trace v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel v1.20.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/otel/trace v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect +@@ -86,10 +85,10 @@ require ( + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +diff --git a/staging/src/k8s.io/controller-manager/go.sum b/staging/src/k8s.io/controller-manager/go.sum +index c1c49c1cce6..e51753268ad 100644 +--- a/staging/src/k8s.io/controller-manager/go.sum ++++ b/staging/src/k8s.io/controller-manager/go.sum +@@ -13,151 +13,149 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +@@ -178,11 +176,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -193,12 +190,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +@@ -222,12 +215,11 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= + github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= + github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +@@ -250,8 +242,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +@@ -269,9 +261,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -319,9 +310,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -336,8 +326,8 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -349,8 +339,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -379,6 +369,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -405,7 +397,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -448,6 +439,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -456,7 +449,6 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE + github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -465,7 +457,6 @@ github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ai + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +@@ -473,10 +464,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -507,32 +496,30 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= + go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= ++go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= ++go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +@@ -620,10 +607,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -806,13 +792,12 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -828,11 +813,9 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM + google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= + google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= + google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= + google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -845,15 +828,15 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +diff --git a/staging/src/k8s.io/cri-api/go.mod b/staging/src/k8s.io/cri-api/go.mod +index 81e68d95b8a..09b5b205a3c 100644 +--- a/staging/src/k8s.io/cri-api/go.mod ++++ b/staging/src/k8s.io/cri-api/go.mod +@@ -6,22 +6,23 @@ go 1.19 + + require ( + github.com/gogo/protobuf v1.3.2 +- github.com/stretchr/testify v1.8.1 +- google.golang.org/grpc v1.56.3 ++ github.com/stretchr/testify v1.8.4 ++ google.golang.org/grpc v1.59.0 + ) + + require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang/protobuf v1.5.4 // indirect +- github.com/kr/text v0.2.0 // indirect +- github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect ++ github.com/google/go-cmp v0.6.0 // indirect ++ github.com/kr/pretty v0.3.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect ++ github.com/rogpeppe/go-internal v1.10.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/protobuf v1.33.0 // indirect +- gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect ++ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + ) + +diff --git a/staging/src/k8s.io/cri-api/go.sum b/staging/src/k8s.io/cri-api/go.sum +index c257686a650..b25ec441ed9 100644 +--- a/staging/src/k8s.io/cri-api/go.sum ++++ b/staging/src/k8s.io/cri-api/go.sum +@@ -1,40 +1,40 @@ +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= ++github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= ++github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= ++github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -50,10 +50,11 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +@@ -74,16 +75,16 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +diff --git a/staging/src/k8s.io/csi-translation-lib/go.mod b/staging/src/k8s.io/csi-translation-lib/go.mod +index 62ef8a578b6..02fd7e4d9e4 100644 +--- a/staging/src/k8s.io/csi-translation-lib/go.mod ++++ b/staging/src/k8s.io/csi-translation-lib/go.mod +@@ -5,7 +5,7 @@ module k8s.io/csi-translation-lib + go 1.19 + + require ( +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/klog/v2 v2.80.1 +@@ -13,10 +13,10 @@ require ( + + require ( + github.com/davecgh/go-spew v1.1.1 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect +diff --git a/staging/src/k8s.io/csi-translation-lib/go.sum b/staging/src/k8s.io/csi-translation-lib/go.sum +index 81deab325b3..bc963ee929a 100644 +--- a/staging/src/k8s.io/csi-translation-lib/go.sum ++++ b/staging/src/k8s.io/csi-translation-lib/go.sum +@@ -5,23 +5,25 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs + github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= + github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +@@ -31,23 +33,20 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ + github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= + github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -86,14 +85,13 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= + gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.mod b/staging/src/k8s.io/dynamic-resource-allocation/go.mod +index 96206991e30..9b75bf3b7b2 100644 +--- a/staging/src/k8s.io/dynamic-resource-allocation/go.mod ++++ b/staging/src/k8s.io/dynamic-resource-allocation/go.mod +@@ -5,10 +5,10 @@ module k8s.io/dynamic-resource-allocation + go 1.19 + + require ( +- github.com/go-logr/logr v1.2.3 +- github.com/google/go-cmp v0.5.9 +- github.com/stretchr/testify v1.8.1 +- google.golang.org/grpc v1.56.3 ++ github.com/go-logr/logr v1.3.0 ++ github.com/google/go-cmp v0.6.0 ++ github.com/stretchr/testify v1.8.4 ++ google.golang.org/grpc v1.59.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/client-go v0.0.0 +@@ -37,13 +37,13 @@ require ( + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.sum b/staging/src/k8s.io/dynamic-resource-allocation/go.sum +index 5e7564b4c87..94c8da4034f 100644 +--- a/staging/src/k8s.io/dynamic-resource-allocation/go.sum ++++ b/staging/src/k8s.io/dynamic-resource-allocation/go.sum +@@ -1,5 +1,5 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +@@ -24,14 +24,14 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg + github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= + github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= + github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -43,7 +43,7 @@ github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/ + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +@@ -68,13 +68,13 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +@@ -86,6 +86,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -105,7 +107,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -121,20 +122,19 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: + github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= + github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -160,13 +160,14 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +@@ -203,14 +204,15 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= + google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -226,8 +228,9 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/kms/go.mod b/staging/src/k8s.io/kms/go.mod +index 2e002392777..502f53b1149 100644 +--- a/staging/src/k8s.io/kms/go.mod ++++ b/staging/src/k8s.io/kms/go.mod +@@ -6,15 +6,16 @@ go 1.19 + + require ( + github.com/gogo/protobuf v1.3.2 +- google.golang.org/grpc v1.56.3 ++ google.golang.org/grpc v1.59.0 + ) + + require ( + github.com/golang/protobuf v1.5.4 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/protobuf v1.33.0 // indirect + ) + +diff --git a/staging/src/k8s.io/kms/go.sum b/staging/src/k8s.io/kms/go.sum +index 3930f4f9ce1..31ee3d6b05c 100644 +--- a/staging/src/k8s.io/kms/go.sum ++++ b/staging/src/k8s.io/kms/go.sum +@@ -1,19 +1,19 @@ +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +@@ -31,10 +31,11 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +@@ -55,10 +56,11 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod +index 5fcb45daf4f..51d4337b998 100644 +--- a/staging/src/k8s.io/kube-aggregator/go.mod ++++ b/staging/src/k8s.io/kube-aggregator/go.mod +@@ -8,11 +8,11 @@ require ( + github.com/davecgh/go-spew v1.1.1 + github.com/emicklei/go-restful/v3 v3.9.0 + github.com/gogo/protobuf v1.3.2 +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + golang.org/x/net v0.23.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 +@@ -31,14 +31,14 @@ require ( + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect +@@ -47,9 +47,9 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.12.7 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect +@@ -70,22 +70,21 @@ require ( + go.etcd.io/etcd/api/v3 v3.5.5 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect + go.etcd.io/etcd/client/v3 v3.5.5 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/otel/trace v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel v1.20.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/otel/trace v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/mod v0.12.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect +@@ -93,10 +92,10 @@ require ( + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.12.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect +diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum +index 99830cc77ae..d4b58d5fb94 100644 +--- a/staging/src/k8s.io/kube-aggregator/go.sum ++++ b/staging/src/k8s.io/kube-aggregator/go.sum +@@ -13,144 +13,143 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +@@ -158,7 +157,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +@@ -179,11 +177,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -194,12 +191,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +@@ -223,12 +216,11 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= + github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= + github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +@@ -252,8 +244,8 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +@@ -271,9 +263,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -321,9 +312,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -338,8 +328,8 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -351,8 +341,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -381,6 +371,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -408,7 +400,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -451,6 +442,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -459,7 +452,6 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE + github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -468,7 +460,6 @@ github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ai + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +@@ -476,10 +467,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -511,32 +500,30 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= + go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= ++go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= ++go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +@@ -625,10 +612,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -813,13 +799,12 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -835,11 +820,9 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM + google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= + google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= + google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= + google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -852,15 +835,15 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +diff --git a/staging/src/k8s.io/kube-controller-manager/go.mod b/staging/src/k8s.io/kube-controller-manager/go.mod +index 166ed635381..ed1cf38ca36 100644 +--- a/staging/src/k8s.io/kube-controller-manager/go.mod ++++ b/staging/src/k8s.io/kube-controller-manager/go.mod +@@ -11,10 +11,11 @@ require ( + ) + + require ( +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect ++ github.com/kr/text v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + golang.org/x/net v0.23.0 // indirect +diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum +index 2d64b51b161..508150c93a3 100644 +--- a/staging/src/k8s.io/kube-controller-manager/go.sum ++++ b/staging/src/k8s.io/kube-controller-manager/go.sum +@@ -4,10 +4,11 @@ github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3 + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= + github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= ++github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= + github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +@@ -17,8 +18,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -30,14 +31,14 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4er + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/cel-go v0.12.7/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +@@ -45,6 +46,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= + github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +@@ -58,8 +61,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G + github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +@@ -69,29 +70,30 @@ github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQg + github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= + github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= + go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= + go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +@@ -108,7 +110,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -133,14 +135,14 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +diff --git a/staging/src/k8s.io/kube-proxy/go.mod b/staging/src/k8s.io/kube-proxy/go.mod +index 143caf64e91..8d5e7f04deb 100644 +--- a/staging/src/k8s.io/kube-proxy/go.mod ++++ b/staging/src/k8s.io/kube-proxy/go.mod +@@ -10,11 +10,12 @@ require ( + ) + + require ( +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect ++ github.com/kr/text v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + golang.org/x/net v0.23.0 // indirect +diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum +index b036e593fac..57623b83682 100644 +--- a/staging/src/k8s.io/kube-proxy/go.sum ++++ b/staging/src/k8s.io/kube-proxy/go.sum +@@ -2,8 +2,9 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6 + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= ++github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= + github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +@@ -12,8 +13,8 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -23,19 +24,21 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= + github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +@@ -49,8 +52,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G + github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +@@ -60,24 +61,25 @@ github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQg + github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= + github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +@@ -94,7 +96,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -118,14 +120,13 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/kube-scheduler/go.mod b/staging/src/k8s.io/kube-scheduler/go.mod +index ffbf082f51a..5b77af4e5f8 100644 +--- a/staging/src/k8s.io/kube-scheduler/go.mod ++++ b/staging/src/k8s.io/kube-scheduler/go.mod +@@ -5,7 +5,7 @@ module k8s.io/kube-scheduler + go 1.19 + + require ( +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/component-base v0.0.0 +@@ -13,7 +13,7 @@ require ( + ) + + require ( +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum +index b036e593fac..87ab23b3750 100644 +--- a/staging/src/k8s.io/kube-scheduler/go.sum ++++ b/staging/src/k8s.io/kube-scheduler/go.sum +@@ -2,7 +2,7 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6 + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +@@ -12,8 +12,8 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -23,19 +23,21 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= + github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +@@ -49,8 +51,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G + github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +@@ -60,24 +60,25 @@ github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQg + github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= + github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +@@ -94,7 +95,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -118,14 +119,13 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/kubectl/go.mod b/staging/src/k8s.io/kubectl/go.mod +index f362555e4f7..f3598ff3bdc 100644 +--- a/staging/src/k8s.io/kubectl/go.mod ++++ b/staging/src/k8s.io/kubectl/go.mod +@@ -15,7 +15,7 @@ require ( + github.com/fvbommel/sortorder v1.0.1 + github.com/go-openapi/jsonreference v0.20.0 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/jonboulle/clockwork v0.2.2 + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de + github.com/lithammer/dedent v1.1.0 +@@ -27,7 +27,7 @@ require ( + github.com/russross/blackfriday/v2 v2.1.0 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + golang.org/x/sys v0.18.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.0.0 +@@ -52,7 +52,7 @@ require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/go-errors/errors v1.0.1 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/swag v0.19.14 // indirect + github.com/gogo/protobuf v1.3.2 // indirect +@@ -60,7 +60,7 @@ require ( + github.com/google/btree v1.0.1 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect +@@ -79,7 +79,7 @@ require ( + github.com/xlab/treeprint v1.1.0 // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect +diff --git a/staging/src/k8s.io/kubectl/go.sum b/staging/src/k8s.io/kubectl/go.sum +index 869ee1f8fe0..0ef57bcce34 100644 +--- a/staging/src/k8s.io/kubectl/go.sum ++++ b/staging/src/k8s.io/kubectl/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +@@ -13,7 +14,7 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +@@ -52,8 +53,8 @@ github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui72 + github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= + github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -97,20 +98,20 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +@@ -125,6 +126,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -157,7 +160,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= +@@ -178,6 +180,8 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: + github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= + github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +@@ -189,30 +193,26 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= + github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +@@ -241,8 +241,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -287,13 +287,12 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= + google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -309,8 +308,9 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/kubelet/go.mod b/staging/src/k8s.io/kubelet/go.mod +index 99b3d8cd46f..5e3ea551494 100644 +--- a/staging/src/k8s.io/kubelet/go.mod ++++ b/staging/src/k8s.io/kubelet/go.mod +@@ -7,7 +7,7 @@ go 1.19 + require ( + github.com/gogo/protobuf v1.3.2 + golang.org/x/net v0.23.0 +- google.golang.org/grpc v1.56.3 ++ google.golang.org/grpc v1.59.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/component-base v0.0.0 +@@ -17,9 +17,9 @@ require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/json-iterator/go v1.1.12 // indirect +@@ -34,7 +34,7 @@ require ( + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +diff --git a/staging/src/k8s.io/kubelet/go.sum b/staging/src/k8s.io/kubelet/go.sum +index e5c6a71c274..2e074d41ffe 100644 +--- a/staging/src/k8s.io/kubelet/go.sum ++++ b/staging/src/k8s.io/kubelet/go.sum +@@ -19,7 +19,7 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +@@ -48,7 +48,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +@@ -71,9 +71,9 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +@@ -88,8 +88,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V + github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -100,7 +100,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -141,8 +141,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -156,10 +156,10 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -182,6 +182,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv + github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -203,8 +205,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= + github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +@@ -239,6 +239,8 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 + github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -252,8 +254,8 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ + github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +@@ -263,15 +265,14 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +@@ -354,7 +355,7 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -365,7 +366,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ + golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +-golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +@@ -522,10 +523,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc + google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -538,8 +539,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa + google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= + google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -558,8 +559,8 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController_test.go +index 8c2b6d290d0..5bf2985acd1 100644 +--- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController_test.go ++++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController_test.go +@@ -354,7 +354,7 @@ func TestCreateBlobDisk(t *testing.T) { + }, + }, nil) + diskURI, err := b.CreateBlobDisk("datadisk", storage.StandardGRS, 10) +- expectedErr := "failed to put page blob datadisk.vhd in container vhds: storage: service returned error: StatusCode=403" ++ expectedErr := "failed to put page blob datadisk.vhd in container vhds" + assert.Error(t, err) + assert.True(t, strings.Contains(err.Error(), expectedErr)) + assert.Empty(t, diskURI) +diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.mod b/staging/src/k8s.io/legacy-cloud-providers/go.mod +index ff558d212c1..e256fad8359 100644 +--- a/staging/src/k8s.io/legacy-cloud-providers/go.mod ++++ b/staging/src/k8s.io/legacy-cloud-providers/go.mod +@@ -14,13 +14,13 @@ require ( + github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b + github.com/aws/aws-sdk-go v1.54.6 + github.com/golang/mock v1.6.0 +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + github.com/vmware/govmomi v0.30.6 + golang.org/x/crypto v0.21.0 +- golang.org/x/oauth2 v0.7.0 +- google.golang.org/api v0.114.0 ++ golang.org/x/oauth2 v0.11.0 ++ google.golang.org/api v0.126.0 + gopkg.in/gcfg.v1 v1.2.3 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 +@@ -34,7 +34,7 @@ require ( + ) + + require ( +- cloud.google.com/go/compute v1.19.1 // indirect ++ cloud.google.com/go/compute v1.23.0 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.1.0 // indirect +@@ -47,7 +47,7 @@ require ( + github.com/dnaeon/go-vcr v1.2.0 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.19.14 // indirect +@@ -58,9 +58,10 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/s2a-go v0.1.4 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect +- github.com/googleapis/gax-go/v2 v2.7.1 // indirect ++ github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +@@ -84,8 +85,8 @@ require ( + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/warnings.v0 v0.1.1 // indirect +diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.sum b/staging/src/k8s.io/legacy-cloud-providers/go.sum +index 2789f55e075..728b165f75c 100644 +--- a/staging/src/k8s.io/legacy-cloud-providers/go.sum ++++ b/staging/src/k8s.io/legacy-cloud-providers/go.sum +@@ -24,22 +24,20 @@ cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWc + cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= + cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= + cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +-cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= + cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= ++cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +@@ -99,7 +97,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +@@ -114,8 +112,12 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= + github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= ++github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= + github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +@@ -137,9 +139,10 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y + github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +@@ -158,8 +161,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -180,7 +183,7 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw + github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= + github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -234,8 +237,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -258,21 +261,23 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe + github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= ++github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= ++github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= + github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= + github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +-github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +-github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= ++github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= ++github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -302,7 +307,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -330,7 +336,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -372,6 +377,8 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua + github.com/rasky/go-xdr v0.0.0-20170217172119-4930550ba2e2/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc= + github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +@@ -392,10 +399,12 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV + github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= ++github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/vmware/govmomi v0.30.6 h1:O3tjSwQBy0XwI5uK1/yVIfQ1LP9bAECEDUfifnyGs9U= + github.com/vmware/govmomi v0.30.6/go.mod h1:epgoslm97rLECMV4D+08ORzUBEU7boFSepKjt7AYVGg= + github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= +@@ -404,6 +413,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de + github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= ++github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= + go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= + go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= + go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= +@@ -416,17 +426,16 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= + go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= + go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= + go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= + go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +@@ -438,6 +447,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= + golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= ++golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= + golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= + golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= + golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +@@ -475,6 +485,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= ++golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= + golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= + golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +@@ -517,6 +528,7 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx + golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= ++golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +@@ -536,8 +548,8 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ + golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -549,6 +561,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= + golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +@@ -607,6 +621,8 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc + golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= + golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +@@ -622,6 +638,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= ++golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= + golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= + golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +@@ -682,6 +699,7 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= + golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= + golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= + golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= ++golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= + golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +@@ -717,8 +735,8 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv + google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= + google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= + google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= +-google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +-google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= ++google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= ++google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= + google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= + google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= + google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +@@ -785,12 +803,13 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc + google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= + google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= + google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -816,8 +835,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ + google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= + google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= + google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +@@ -838,8 +858,9 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= + gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +diff --git a/staging/src/k8s.io/metrics/go.mod b/staging/src/k8s.io/metrics/go.mod +index 9de813f9ead..86f5f78836c 100644 +--- a/staging/src/k8s.io/metrics/go.mod ++++ b/staging/src/k8s.io/metrics/go.mod +@@ -6,7 +6,7 @@ go 1.19 + + require ( + github.com/gogo/protobuf v1.3.2 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/client-go v0.0.0 +@@ -17,13 +17,13 @@ require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.19.14 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +@@ -36,7 +36,7 @@ require ( + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect +diff --git a/staging/src/k8s.io/metrics/go.sum b/staging/src/k8s.io/metrics/go.sum +index 5066384989a..e8ae2bc238e 100644 +--- a/staging/src/k8s.io/metrics/go.sum ++++ b/staging/src/k8s.io/metrics/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +@@ -22,8 +23,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= + github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -58,12 +59,12 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +@@ -74,6 +75,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -92,7 +95,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -104,19 +106,18 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +@@ -144,8 +145,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -208,8 +209,9 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/mount-utils/go.mod b/staging/src/k8s.io/mount-utils/go.mod +index a577e9decd7..a0ac7288a8c 100644 +--- a/staging/src/k8s.io/mount-utils/go.mod ++++ b/staging/src/k8s.io/mount-utils/go.mod +@@ -6,19 +6,19 @@ go 1.19 + + require ( + github.com/moby/sys/mountinfo v0.6.2 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/klog/v2 v2.80.1 + k8s.io/utils v0.0.0-20221107191617-1a15be271d1d + ) + + require ( + github.com/davecgh/go-spew v1.1.1 // indirect +- github.com/go-logr/logr v1.2.3 // indirect +- github.com/kr/text v0.2.0 // indirect +- github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect ++ github.com/go-logr/logr v1.3.0 // indirect ++ github.com/kr/pretty v0.3.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect ++ github.com/rogpeppe/go-internal v1.10.0 // indirect + golang.org/x/sys v0.18.0 // indirect +- gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect ++ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + ) + +diff --git a/staging/src/k8s.io/mount-utils/go.sum b/staging/src/k8s.io/mount-utils/go.sum +index 82804a95abb..ca53240869d 100644 +--- a/staging/src/k8s.io/mount-utils/go.sum ++++ b/staging/src/k8s.io/mount-utils/go.sum +@@ -1,34 +1,33 @@ + github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= ++github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= + github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= + github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= ++github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= ++github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= + golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +diff --git a/staging/src/k8s.io/pod-security-admission/go.mod b/staging/src/k8s.io/pod-security-admission/go.mod +index 1220f9c990d..09bf7ca27f2 100644 +--- a/staging/src/k8s.io/pod-security-admission/go.mod ++++ b/staging/src/k8s.io/pod-security-admission/go.mod +@@ -6,10 +6,10 @@ go 1.19 + + require ( + github.com/blang/semver/v4 v4.0.0 +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/apiserver v0.0.0 +@@ -24,7 +24,7 @@ require ( + github.com/NYTimes/gziphandler v1.1.1 // indirect + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/beorn7/perks v1.0.1 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect +@@ -33,7 +33,7 @@ require ( + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect +@@ -45,9 +45,9 @@ require ( + github.com/google/cel-go v0.12.7 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect +@@ -67,32 +67,31 @@ require ( + go.etcd.io/etcd/api/v3 v3.5.5 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect + go.etcd.io/etcd/client/v3 v3.5.5 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/otel/trace v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel v1.20.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/otel/trace v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect +diff --git a/staging/src/k8s.io/pod-security-admission/go.sum b/staging/src/k8s.io/pod-security-admission/go.sum +index 8e086d588de..9b8bba0a778 100644 +--- a/staging/src/k8s.io/pod-security-admission/go.sum ++++ b/staging/src/k8s.io/pod-security-admission/go.sum +@@ -13,144 +13,143 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +@@ -158,7 +157,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +@@ -179,11 +177,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -194,12 +191,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +@@ -223,12 +216,11 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= + github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= + github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +@@ -251,8 +243,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -271,9 +263,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -321,9 +312,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -338,8 +328,8 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -351,8 +341,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -381,6 +371,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -407,7 +399,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -450,6 +441,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -458,7 +451,6 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE + github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -467,7 +459,6 @@ github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ai + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +@@ -475,10 +466,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -509,32 +498,30 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= + go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= ++go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= ++go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +@@ -622,10 +609,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -808,13 +794,12 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -830,11 +815,9 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM + google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= + google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= + google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= + google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -847,15 +830,15 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod +index c6a9be5f61e..e01d860eead 100644 +--- a/staging/src/k8s.io/sample-apiserver/go.mod ++++ b/staging/src/k8s.io/sample-apiserver/go.mod +@@ -21,7 +21,7 @@ require ( + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect +@@ -30,7 +30,7 @@ require ( + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect +@@ -40,10 +40,10 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.12.7 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/go-cmp v0.5.9 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect +@@ -63,23 +63,22 @@ require ( + go.etcd.io/etcd/api/v3 v3.5.5 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect + go.etcd.io/etcd/client/v3 v3.5.5 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/otel/trace v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel v1.20.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/otel/trace v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect +@@ -87,10 +86,10 @@ require ( + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.12.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect +diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum +index 593e54e214b..220ea1166a7 100644 +--- a/staging/src/k8s.io/sample-apiserver/go.sum ++++ b/staging/src/k8s.io/sample-apiserver/go.sum +@@ -13,144 +13,143 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +@@ -158,7 +157,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +@@ -179,11 +177,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -194,12 +191,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +@@ -223,12 +216,11 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= + github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= + github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +@@ -252,8 +244,8 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +@@ -271,9 +263,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -321,9 +312,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -338,8 +328,8 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -351,8 +341,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -381,6 +371,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -407,7 +399,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -450,6 +441,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -458,7 +451,6 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE + github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -473,8 +465,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -506,32 +498,30 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= + go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= ++go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= ++go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +@@ -620,10 +610,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -808,13 +797,12 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -830,11 +818,9 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM + google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= + google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= + google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= + google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -847,15 +833,15 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +diff --git a/staging/src/k8s.io/sample-cli-plugin/go.mod b/staging/src/k8s.io/sample-cli-plugin/go.mod +index 0ff09f42f4e..6cfd022fe12 100644 +--- a/staging/src/k8s.io/sample-cli-plugin/go.mod ++++ b/staging/src/k8s.io/sample-cli-plugin/go.mod +@@ -16,7 +16,7 @@ require ( + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/go-errors/errors v1.0.1 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.19.14 // indirect +@@ -24,10 +24,10 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect +@@ -45,7 +45,7 @@ require ( + github.com/xlab/treeprint v1.1.0 // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect +diff --git a/staging/src/k8s.io/sample-cli-plugin/go.sum b/staging/src/k8s.io/sample-cli-plugin/go.sum +index 670fb570d6b..a0b02692b38 100644 +--- a/staging/src/k8s.io/sample-cli-plugin/go.sum ++++ b/staging/src/k8s.io/sample-cli-plugin/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +@@ -27,8 +28,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL + github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= + github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= + github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -64,15 +65,15 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +@@ -87,6 +88,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -109,7 +112,6 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= + github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +@@ -120,6 +122,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= + github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +@@ -138,8 +142,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= + github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +@@ -169,8 +173,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -231,8 +235,9 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/sample-controller/go.mod b/staging/src/k8s.io/sample-controller/go.mod +index b5040f1c151..a03741de594 100644 +--- a/staging/src/k8s.io/sample-controller/go.mod ++++ b/staging/src/k8s.io/sample-controller/go.mod +@@ -16,7 +16,7 @@ require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.19.14 // indirect +@@ -24,7 +24,7 @@ require ( + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/josharian/intern v1.0.0 // indirect +@@ -37,7 +37,7 @@ require ( + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.23.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect +diff --git a/staging/src/k8s.io/sample-controller/go.sum b/staging/src/k8s.io/sample-controller/go.sum +index cb2eb264896..04f3a607b6b 100644 +--- a/staging/src/k8s.io/sample-controller/go.sum ++++ b/staging/src/k8s.io/sample-controller/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +@@ -22,8 +23,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= + github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= + github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +@@ -59,13 +60,13 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +@@ -77,6 +78,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -95,7 +98,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= + github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +@@ -107,6 +109,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +@@ -114,8 +118,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +@@ -143,8 +147,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= + golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -207,8 +211,9 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= ++gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go +index a5b020992b8..63955370032 100644 +--- a/vendor/cloud.google.com/go/compute/internal/version.go ++++ b/vendor/cloud.google.com/go/compute/internal/version.go +@@ -15,4 +15,4 @@ + package internal + + // Version is the current tagged release of the library. +-const Version = "1.19.1" ++const Version = "1.23.0" +diff --git a/vendor/github.com/cenkalti/backoff/v4/.travis.yml b/vendor/github.com/cenkalti/backoff/v4/.travis.yml +deleted file mode 100644 +index c79105c2fbe..00000000000 +--- a/vendor/github.com/cenkalti/backoff/v4/.travis.yml ++++ /dev/null +@@ -1,10 +0,0 @@ +-language: go +-go: +- - 1.13 +- - 1.x +- - tip +-before_install: +- - go get github.com/mattn/goveralls +- - go get golang.org/x/tools/cmd/cover +-script: +- - $HOME/gopath/bin/goveralls -service=travis-ci +diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go +index 1ce2507ebc8..b9c0c51cd75 100644 +--- a/vendor/github.com/cenkalti/backoff/v4/retry.go ++++ b/vendor/github.com/cenkalti/backoff/v4/retry.go +@@ -5,10 +5,20 @@ import ( + "time" + ) + ++// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). ++// The operation will be retried using a backoff policy if it returns an error. ++type OperationWithData[T any] func() (T, error) ++ + // An Operation is executing by Retry() or RetryNotify(). + // The operation will be retried using a backoff policy if it returns an error. + type Operation func() error + ++func (o Operation) withEmptyData() OperationWithData[struct{}] { ++ return func() (struct{}, error) { ++ return struct{}{}, o() ++ } ++} ++ + // Notify is a notify-on-error function. It receives an operation error and + // backoff delay if the operation failed (with an error). + // +@@ -28,18 +38,41 @@ func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) + } + ++// RetryWithData is like Retry but returns data in the response too. ++func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { ++ return RetryNotifyWithData(o, b, nil) ++} ++ + // RetryNotify calls notify function with the error and wait duration + // for each failed attempt before sleep. + func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) + } + ++// RetryNotifyWithData is like RetryNotify but returns data in the response too. ++func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { ++ return doRetryNotify(operation, b, notify, nil) ++} ++ + // RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer + // for each failed attempt before sleep. + // A default timer that uses system timer is used when nil is passed. + func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { +- var err error +- var next time.Duration ++ _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) ++ return err ++} ++ ++// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. ++func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { ++ return doRetryNotify(operation, b, notify, t) ++} ++ ++func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { ++ var ( ++ err error ++ next time.Duration ++ res T ++ ) + if t == nil { + t = &defaultTimer{} + } +@@ -52,21 +85,22 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer + + b.Reset() + for { +- if err = operation(); err == nil { +- return nil ++ res, err = operation() ++ if err == nil { ++ return res, nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { +- return permanent.Err ++ return res, permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { +- return cerr ++ return res, cerr + } + +- return err ++ return res, err + } + + if notify != nil { +@@ -77,7 +111,7 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer + + select { + case <-ctx.Done(): +- return ctx.Err() ++ return res, ctx.Err() + case <-t.C(): + } + } +diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml +index 94ff801df1a..0cffafa7bf9 100644 +--- a/vendor/github.com/go-logr/logr/.golangci.yaml ++++ b/vendor/github.com/go-logr/logr/.golangci.yaml +@@ -6,7 +6,6 @@ linters: + disable-all: true + enable: + - asciicheck +- - deadcode + - errcheck + - forcetypeassert + - gocritic +@@ -18,10 +17,8 @@ linters: + - misspell + - revive + - staticcheck +- - structcheck + - typecheck + - unused +- - varcheck + + issues: + exclude-use-default: false +diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md +index ab593118131..a8c29bfbd53 100644 +--- a/vendor/github.com/go-logr/logr/README.md ++++ b/vendor/github.com/go-logr/logr/README.md +@@ -1,6 +1,7 @@ + # A minimal logging API for Go + + [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) ++[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) + + logr offers an(other) opinion on how Go programs and libraries can do logging + without becoming coupled to a particular logging implementation. This is not +@@ -73,6 +74,29 @@ received: + If the Go standard library had defined an interface for logging, this project + probably would not be needed. Alas, here we are. + ++When the Go developers started developing such an interface with ++[slog](https://github.com/golang/go/issues/56345), they adopted some of the ++logr design but also left out some parts and changed others: ++ ++| Feature | logr | slog | ++|---------|------|------| ++| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) | ++| Low-level API | `LogSink` | `Handler` | ++| Stack unwinding | done by `LogSink` | done by `Logger` | ++| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) | ++| Generating a value for logging on demand | `Marshaler` | `LogValuer` | ++| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | ++| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | ++| Passing logger via context | `NewContext`, `FromContext` | no API | ++| Adding a name to a logger | `WithName` | no API | ++| Modify verbosity of log entries in a call chain | `V` | no API | ++| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | ++ ++The high-level slog API is explicitly meant to be one of many different APIs ++that can be layered on top of a shared `slog.Handler`. logr is one such ++alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) ++package. ++ + ### Inspiration + + Before you consider this package, please read [this blog post by the +@@ -118,6 +142,91 @@ There are implementations for the following logging libraries: + - **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) + - **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) + ++## slog interoperability ++ ++Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` ++and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and ++`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. ++As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level ++slog API. `slogr` itself leaves that to the caller. ++ ++## Using a `logr.Sink` as backend for slog ++ ++Ideally, a logr sink implementation should support both logr and slog by ++implementing both the normal logr interface(s) and `slogr.SlogSink`. Because ++of a conflict in the parameters of the common `Enabled` method, it is [not ++possible to implement both slog.Handler and logr.Sink in the same ++type](https://github.com/golang/go/issues/59110). ++ ++If both are supported, log calls can go from the high-level APIs to the backend ++without the need to convert parameters. `NewLogr` and `NewSlogHandler` can ++convert back and forth without adding additional wrappers, with one exception: ++when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then ++`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future ++log calls. ++ ++Such an implementation should also support values that implement specific ++interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, ++`slog.GroupValue`). logr does not convert those. ++ ++Not supporting slog has several drawbacks: ++- Recording source code locations works correctly if the handler gets called ++ through `slog.Logger`, but may be wrong in other cases. That's because a ++ `logr.Sink` does its own stack unwinding instead of using the program counter ++ provided by the high-level API. ++- slog levels <= 0 can be mapped to logr levels by negating the level without a ++ loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as ++ used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink ++ because logr does not support "more important than info" levels. ++- The slog group concept is supported by prefixing each key in a key/value ++ pair with the group names, separated by a dot. For structured output like ++ JSON it would be better to group the key/value pairs inside an object. ++- Special slog values and interfaces don't work as expected. ++- The overhead is likely to be higher. ++ ++These drawbacks are severe enough that applications using a mixture of slog and ++logr should switch to a different backend. ++ ++## Using a `slog.Handler` as backend for logr ++ ++Using a plain `slog.Handler` without support for logr works better than the ++other direction: ++- All logr verbosity levels can be mapped 1:1 to their corresponding slog level ++ by negating them. ++- Stack unwinding is done by the `slogr.SlogSink` and the resulting program ++ counter is passed to the `slog.Handler`. ++- Names added via `Logger.WithName` are gathered and recorded in an additional ++ attribute with `logger` as key and the names separated by slash as value. ++- `Logger.Error` is turned into a log record with `slog.LevelError` as level ++ and an additional attribute with `err` as key, if an error was provided. ++ ++The main drawback is that `logr.Marshaler` will not be supported. Types should ++ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility ++with logr implementations without slog support is not important, then ++`slog.Valuer` is sufficient. ++ ++## Context support for slog ++ ++Storing a logger in a `context.Context` is not supported by ++slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this ++to fill this gap: ++ ++ func HandlerFromContext(ctx context.Context) slog.Handler { ++ logger, err := logr.FromContext(ctx) ++ if err == nil { ++ return slogr.NewSlogHandler(logger) ++ } ++ return slog.Default().Handler() ++ } ++ ++ func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { ++ return logr.NewContext(ctx, slogr.NewLogr(handler)) ++ } ++ ++The downside is that storing and retrieving a `slog.Handler` needs more ++allocations compared to using a `logr.Logger`. Therefore the recommendation is ++to use the `logr.Logger` API in code which uses contextual logging. ++ + ## FAQ + + ### Conceptual +@@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this", + + Then gradually choose levels in between as you need them, working your way + down from 10 (for debug and trace style logs) and up from 1 (for chattier +-info-type logs.) ++info-type logs). For reference, slog pre-defines -4 for debug logs ++(corresponds to 4 in logr), which matches what is ++[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). + + #### How do I choose my keys? + +diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md +new file mode 100644 +index 00000000000..1ca756fc7b3 +--- /dev/null ++++ b/vendor/github.com/go-logr/logr/SECURITY.md +@@ -0,0 +1,18 @@ ++# Security Policy ++ ++If you have discovered a security vulnerability in this project, please report it ++privately. **Do not disclose it as a public issue.** This gives us time to work with you ++to fix the issue before public exposure, reducing the chance that the exploit will be ++used before a patch is released. ++ ++You may submit the report in the following ways: ++ ++- send an email to go-logr-security@googlegroups.com ++- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) ++ ++Please provide the following information in your report: ++ ++- A description of the vulnerability and its impact ++- How to reproduce the issue ++ ++We ask that you give us 90 days to work on a fix before public exposure. +diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go +index 9d92a38f1d7..99fe8be93c1 100644 +--- a/vendor/github.com/go-logr/logr/discard.go ++++ b/vendor/github.com/go-logr/logr/discard.go +@@ -20,35 +20,5 @@ package logr + // used whenever the caller is not interested in the logs. Logger instances + // produced by this function always compare as equal. + func Discard() Logger { +- return Logger{ +- level: 0, +- sink: discardLogSink{}, +- } +-} +- +-// discardLogSink is a LogSink that discards all messages. +-type discardLogSink struct{} +- +-// Verify that it actually implements the interface +-var _ LogSink = discardLogSink{} +- +-func (l discardLogSink) Init(RuntimeInfo) { +-} +- +-func (l discardLogSink) Enabled(int) bool { +- return false +-} +- +-func (l discardLogSink) Info(int, string, ...interface{}) { +-} +- +-func (l discardLogSink) Error(error, string, ...interface{}) { +-} +- +-func (l discardLogSink) WithValues(...interface{}) LogSink { +- return l +-} +- +-func (l discardLogSink) WithName(string) LogSink { +- return l ++ return New(nil) + } +diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go +index 7accdb0c400..12e5807cc5c 100644 +--- a/vendor/github.com/go-logr/logr/funcr/funcr.go ++++ b/vendor/github.com/go-logr/logr/funcr/funcr.go +@@ -21,13 +21,13 @@ limitations under the License. + // github.com/go-logr/logr.LogSink with output through an arbitrary + // "write" function. See New and NewJSON for details. + // +-// Custom LogSinks ++// # Custom LogSinks + // + // For users who need more control, a funcr.Formatter can be embedded inside + // your own custom LogSink implementation. This is useful when the LogSink + // needs to implement additional methods, for example. + // +-// Formatting ++// # Formatting + // + // This will respect logr.Marshaler, fmt.Stringer, and error interfaces for + // values which are being logged. When rendering a struct, funcr will use Go's +@@ -37,6 +37,7 @@ package funcr + import ( + "bytes" + "encoding" ++ "encoding/json" + "fmt" + "path/filepath" + "reflect" +@@ -115,17 +116,17 @@ type Options struct { + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). +- RenderBuiltinsHook func(kvList []interface{}) []interface{} ++ RenderBuiltinsHook func(kvList []any) []any + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. +- RenderValuesHook func(kvList []interface{}) []interface{} ++ RenderValuesHook func(kvList []any) []any + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. +- RenderArgsHook func(kvList []interface{}) []interface{} ++ RenderArgsHook func(kvList []any) []any + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, +@@ -162,7 +163,7 @@ func (l fnlogger) WithName(name string) logr.LogSink { + return &l + } + +-func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { ++func (l fnlogger) WithValues(kvList ...any) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l + } +@@ -172,12 +173,12 @@ func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + return &l + } + +-func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { ++func (l fnlogger) Info(level int, msg string, kvList ...any) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) + } + +-func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { ++func (l fnlogger) Error(err error, msg string, kvList ...any) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) + } +@@ -217,7 +218,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { + prefix: "", + values: nil, + depth: 0, +- opts: opts, ++ opts: &opts, + } + return f + } +@@ -228,10 +229,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { + type Formatter struct { + outputFormat outputFormat + prefix string +- values []interface{} ++ values []any + valuesStr string + depth int +- opts Options ++ opts *Options + } + + // outputFormat indicates which outputFormat to use. +@@ -245,10 +246,10 @@ const ( + ) + + // PseudoStruct is a list of key-value pairs that gets logged as a struct. +-type PseudoStruct []interface{} ++type PseudoStruct []any + + // render produces a log line, ready to use. +-func (f Formatter) render(builtins, args []interface{}) string { ++func (f Formatter) render(builtins, args []any) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { +@@ -291,7 +292,7 @@ func (f Formatter) render(builtins, args []interface{}) string { + // This function returns a potentially modified version of kvList, which + // ensures that there is a value for every key (adding a value if needed) and + // that each key is a string (substituting a key if needed). +-func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { ++func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { +@@ -333,7 +334,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b + return kvList + } + +-func (f Formatter) pretty(value interface{}) string { ++func (f Formatter) pretty(value any) string { + return f.prettyWithFlags(value, 0, 0) + } + +@@ -342,7 +343,7 @@ const ( + ) + + // TODO: This is not fast. Most of the overhead goes here. +-func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { ++func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } +@@ -447,6 +448,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } ++ printComma := false // testing i>0 is not enough because of JSON omitted fields + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { +@@ -478,9 +480,10 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s + if omitempty && isEmpty(v.Field(i)) { + continue + } +- if i > 0 { ++ if printComma { + buf.WriteByte(',') + } ++ printComma = true // if we got here, we are rendering a field + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue +@@ -500,6 +503,20 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s + } + return buf.String() + case reflect.Slice, reflect.Array: ++ // If this is outputing as JSON make sure this isn't really a json.RawMessage. ++ // If so just emit "as-is" and don't pretty it as that will just print ++ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. ++ if f.outputFormat == outputJSON { ++ if rm, ok := value.(json.RawMessage); ok { ++ // If it's empty make sure we emit an empty value as the array style would below. ++ if len(rm) > 0 { ++ buf.Write(rm) ++ } else { ++ buf.WriteString("null") ++ } ++ return buf.String() ++ } ++ } + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { +@@ -597,7 +614,7 @@ func isEmpty(v reflect.Value) bool { + return false + } + +-func invokeMarshaler(m logr.Marshaler) (ret interface{}) { ++func invokeMarshaler(m logr.Marshaler) (ret any) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) +@@ -658,12 +675,12 @@ func (f Formatter) caller() Caller { + + const noValue = "" + +-func (f Formatter) nonStringKey(v interface{}) string { ++func (f Formatter) nonStringKey(v any) string { + return fmt.Sprintf("", f.snippet(v)) + } + + // snippet produces a short snippet string of an arbitrary value. +-func (f Formatter) snippet(v interface{}) string { ++func (f Formatter) snippet(v any) string { + const snipLen = 16 + + snip := f.pretty(v) +@@ -676,7 +693,7 @@ func (f Formatter) snippet(v interface{}) string { + // sanitize ensures that a list of key-value pairs has a value for every key + // (adding a value if needed) and that each key is a string (substituting a key + // if needed). +-func (f Formatter) sanitize(kvList []interface{}) []interface{} { ++func (f Formatter) sanitize(kvList []any) []any { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } +@@ -710,8 +727,8 @@ func (f Formatter) GetDepth() int { + // FormatInfo renders an Info log message into strings. The prefix will be + // empty when no names were set (via AddNames), or when the output is + // configured for JSON. +-func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { +- args := make([]interface{}, 0, 64) // using a constant here impacts perf ++func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { ++ args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) +@@ -728,10 +745,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref + } + + // FormatError renders an Error log message into strings. The prefix will be +-// empty when no names were set (via AddNames), or when the output is ++// empty when no names were set (via AddNames), or when the output is + // configured for JSON. +-func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { +- args := make([]interface{}, 0, 64) // using a constant here impacts perf ++func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { ++ args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) +@@ -744,12 +761,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) +- var loggableErr interface{} ++ var loggableErr any + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) +- return f.prefix, f.render(args, kvList) ++ return prefix, f.render(args, kvList) + } + + // AddName appends the specified name. funcr uses '/' characters to separate +@@ -764,7 +781,7 @@ func (f *Formatter) AddName(name string) { + + // AddValues adds key-value pairs to the set of saved values to be logged with + // each log line. +-func (f *Formatter) AddValues(kvList []interface{}) { ++func (f *Formatter) AddValues(kvList []any) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) +diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go +index c3b56b3d2c5..2a5075a180f 100644 +--- a/vendor/github.com/go-logr/logr/logr.go ++++ b/vendor/github.com/go-logr/logr/logr.go +@@ -21,7 +21,7 @@ limitations under the License. + // to back that API. Packages in the Go ecosystem can depend on this package, + // while callers can implement logging with whatever backend is appropriate. + // +-// Usage ++// # Usage + // + // Logging is done using a Logger instance. Logger is a concrete type with + // methods, which defers the actual logging to a LogSink interface. The main +@@ -30,16 +30,20 @@ limitations under the License. + // "structured logging". + // + // With Go's standard log package, we might write: +-// log.Printf("setting target value %s", targetValue) ++// ++// log.Printf("setting target value %s", targetValue) + // + // With logr's structured logging, we'd write: +-// logger.Info("setting target", "value", targetValue) ++// ++// logger.Info("setting target", "value", targetValue) + // + // Errors are much the same. Instead of: +-// log.Printf("failed to open the pod bay door for user %s: %v", user, err) ++// ++// log.Printf("failed to open the pod bay door for user %s: %v", user, err) + // + // We'd write: +-// logger.Error(err, "failed to open the pod bay door", "user", user) ++// ++// logger.Error(err, "failed to open the pod bay door", "user", user) + // + // Info() and Error() are very similar, but they are separate methods so that + // LogSink implementations can choose to do things like attach additional +@@ -47,7 +51,7 @@ limitations under the License. + // always logged, regardless of the current verbosity. If there is no error + // instance available, passing nil is valid. + // +-// Verbosity ++// # Verbosity + // + // Often we want to log information only when the application in "verbose + // mode". To write log lines that are more verbose, Logger has a V() method. +@@ -58,20 +62,22 @@ limitations under the License. + // Error messages do not have a verbosity level and are always logged. + // + // Where we might have written: +-// if flVerbose >= 2 { +-// log.Printf("an unusual thing happened") +-// } ++// ++// if flVerbose >= 2 { ++// log.Printf("an unusual thing happened") ++// } + // + // We can write: +-// logger.V(2).Info("an unusual thing happened") + // +-// Logger Names ++// logger.V(2).Info("an unusual thing happened") ++// ++// # Logger Names + // + // Logger instances can have name strings so that all messages logged through + // that instance have additional context. For example, you might want to add + // a subsystem name: + // +-// logger.WithName("compactor").Info("started", "time", time.Now()) ++// logger.WithName("compactor").Info("started", "time", time.Now()) + // + // The WithName() method returns a new Logger, which can be passed to + // constructors or other functions for further use. Repeated use of WithName() +@@ -82,25 +88,27 @@ limitations under the License. + // joining operation (e.g. whitespace, commas, periods, slashes, brackets, + // quotes, etc). + // +-// Saved Values ++// # Saved Values + // + // Logger instances can store any number of key/value pairs, which will be + // logged alongside all messages logged through that instance. For example, + // you might want to create a Logger instance per managed object: + // + // With the standard log package, we might write: +-// log.Printf("decided to set field foo to value %q for object %s/%s", +-// targetValue, object.Namespace, object.Name) ++// ++// log.Printf("decided to set field foo to value %q for object %s/%s", ++// targetValue, object.Namespace, object.Name) + // + // With logr we'd write: +-// // Elsewhere: set up the logger to log the object name. +-// obj.logger = mainLogger.WithValues( +-// "name", obj.name, "namespace", obj.namespace) + // +-// // later on... +-// obj.logger.Info("setting foo", "value", targetValue) ++// // Elsewhere: set up the logger to log the object name. ++// obj.logger = mainLogger.WithValues( ++// "name", obj.name, "namespace", obj.namespace) ++// ++// // later on... ++// obj.logger.Info("setting foo", "value", targetValue) + // +-// Best Practices ++// # Best Practices + // + // Logger has very few hard rules, with the goal that LogSink implementations + // might have a lot of freedom to differentiate. There are, however, some +@@ -119,20 +127,20 @@ limitations under the License. + // such a value can call its methods without having to check whether the + // instance is ready for use. + // +-// Calling methods with the null logger (Logger{}) as instance will crash +-// because it has no LogSink. Therefore this null logger should never be passed +-// around. For cases where passing a logger is optional, a pointer to Logger ++// The zero logger (= Logger{}) is identical to Discard() and discards all log ++// entries. Code that receives a Logger by value can simply call it, the methods ++// will never crash. For cases where passing a logger is optional, a pointer to Logger + // should be used. + // +-// Key Naming Conventions ++// # Key Naming Conventions + // + // Keys are not strictly required to conform to any specification or regex, but + // it is recommended that they: +-// * be human-readable and meaningful (not auto-generated or simple ordinals) +-// * be constant (not dependent on input data) +-// * contain only printable characters +-// * not contain whitespace or punctuation +-// * use lower case for simple keys and lowerCamelCase for more complex ones ++// - be human-readable and meaningful (not auto-generated or simple ordinals) ++// - be constant (not dependent on input data) ++// - contain only printable characters ++// - not contain whitespace or punctuation ++// - use lower case for simple keys and lowerCamelCase for more complex ones + // + // These guidelines help ensure that log data is processed properly regardless + // of the log implementation. For example, log implementations will try to +@@ -141,51 +149,54 @@ limitations under the License. + // While users are generally free to use key names of their choice, it's + // generally best to avoid using the following keys, as they're frequently used + // by implementations: +-// * "caller": the calling information (file/line) of a particular log line +-// * "error": the underlying error value in the `Error` method +-// * "level": the log level +-// * "logger": the name of the associated logger +-// * "msg": the log message +-// * "stacktrace": the stack trace associated with a particular log line or +-// error (often from the `Error` message) +-// * "ts": the timestamp for a log line ++// - "caller": the calling information (file/line) of a particular log line ++// - "error": the underlying error value in the `Error` method ++// - "level": the log level ++// - "logger": the name of the associated logger ++// - "msg": the log message ++// - "stacktrace": the stack trace associated with a particular log line or ++// error (often from the `Error` message) ++// - "ts": the timestamp for a log line + // + // Implementations are encouraged to make use of these keys to represent the + // above concepts, when necessary (for example, in a pure-JSON output form, it + // would be necessary to represent at least message and timestamp as ordinary + // named values). + // +-// Break Glass ++// # Break Glass + // + // Implementations may choose to give callers access to the underlying + // logging implementation. The recommended pattern for this is: +-// // Underlier exposes access to the underlying logging implementation. +-// // Since callers only have a logr.Logger, they have to know which +-// // implementation is in use, so this interface is less of an abstraction +-// // and more of way to test type conversion. +-// type Underlier interface { +-// GetUnderlying() +-// } ++// ++// // Underlier exposes access to the underlying logging implementation. ++// // Since callers only have a logr.Logger, they have to know which ++// // implementation is in use, so this interface is less of an abstraction ++// // and more of way to test type conversion. ++// type Underlier interface { ++// GetUnderlying() ++// } + // + // Logger grants access to the sink to enable type assertions like this: +-// func DoSomethingWithImpl(log logr.Logger) { +-// if underlier, ok := log.GetSink()(impl.Underlier) { +-// implLogger := underlier.GetUnderlying() +-// ... +-// } +-// } ++// ++// func DoSomethingWithImpl(log logr.Logger) { ++// if underlier, ok := log.GetSink().(impl.Underlier); ok { ++// implLogger := underlier.GetUnderlying() ++// ... ++// } ++// } + // + // Custom `With*` functions can be implemented by copying the complete + // Logger struct and replacing the sink in the copy: +-// // WithFooBar changes the foobar parameter in the log sink and returns a +-// // new logger with that modified sink. It does nothing for loggers where +-// // the sink doesn't support that parameter. +-// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +-// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { +-// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +-// } +-// return log +-// } ++// ++// // WithFooBar changes the foobar parameter in the log sink and returns a ++// // new logger with that modified sink. It does nothing for loggers where ++// // the sink doesn't support that parameter. ++// func WithFoobar(log logr.Logger, foobar int) logr.Logger { ++// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { ++// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) ++// } ++// return log ++// } + // + // Don't use New to construct a new Logger with a LogSink retrieved from an + // existing Logger. Source code attribution might not work correctly and +@@ -201,11 +212,14 @@ import ( + ) + + // New returns a new Logger instance. This is primarily used by libraries +-// implementing LogSink, rather than end users. ++// implementing LogSink, rather than end users. Passing a nil sink will create ++// a Logger which discards all log lines. + func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) +- sink.Init(runtimeInfo) ++ if sink != nil { ++ sink.Init(runtimeInfo) ++ } + return logger + } + +@@ -244,7 +258,13 @@ type Logger struct { + // Enabled tests whether this Logger is enabled. For example, commandline + // flags might be used to set the logging verbosity and disable some info logs. + func (l Logger) Enabled() bool { +- return l.sink.Enabled(l.level) ++ // Some implementations of LogSink look at the caller in Enabled (e.g. ++ // different verbosity levels per package or file), but we only pass one ++ // CallDepth in (via Init). This means that all calls from Logger to the ++ // LogSink's Enabled, Info, and Error methods must have the same number of ++ // frames. In other words, Logger methods can't call other Logger methods ++ // which call these LogSink methods unless we do it the same in all paths. ++ return l.sink != nil && l.sink.Enabled(l.level) + } + + // Info logs a non-error message with the given key/value pairs as context. +@@ -253,8 +273,11 @@ func (l Logger) Enabled() bool { + // line. The key/value pairs can then be used to add additional variable + // information. The key/value pairs must alternate string keys and arbitrary + // values. +-func (l Logger) Info(msg string, keysAndValues ...interface{}) { +- if l.Enabled() { ++func (l Logger) Info(msg string, keysAndValues ...any) { ++ if l.sink == nil { ++ return ++ } ++ if l.sink.Enabled(l.level) { // see comment in Enabled + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } +@@ -272,7 +295,10 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { + // while the err argument should be used to attach the actual error that + // triggered this log line, if present. The err parameter is optional + // and nil may be passed instead of an error instance. +-func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { ++func (l Logger) Error(err error, msg string, keysAndValues ...any) { ++ if l.sink == nil { ++ return ++ } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } +@@ -284,6 +310,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + // level means a log message is less important. Negative V-levels are treated + // as 0. + func (l Logger) V(level int) Logger { ++ if l.sink == nil { ++ return l ++ } + if level < 0 { + level = 0 + } +@@ -291,9 +320,19 @@ func (l Logger) V(level int) Logger { + return l + } + ++// GetV returns the verbosity level of the logger. If the logger's LogSink is ++// nil as in the Discard logger, this will always return 0. ++func (l Logger) GetV() int { ++ // 0 if l.sink nil because of the if check in V above. ++ return l.level ++} ++ + // WithValues returns a new Logger instance with additional key/value pairs. + // See Info for documentation on how key/value pairs work. +-func (l Logger) WithValues(keysAndValues ...interface{}) Logger { ++func (l Logger) WithValues(keysAndValues ...any) Logger { ++ if l.sink == nil { ++ return l ++ } + l.setSink(l.sink.WithValues(keysAndValues...)) + return l + } +@@ -304,6 +343,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + // contain only letters, digits, and hyphens (see the package documentation for + // more information). + func (l Logger) WithName(name string) Logger { ++ if l.sink == nil { ++ return l ++ } + l.setSink(l.sink.WithName(name)) + return l + } +@@ -324,6 +366,9 @@ func (l Logger) WithName(name string) Logger { + // WithCallDepth(1) because it works with implementions that support the + // CallDepthLogSink and/or CallStackHelperLogSink interfaces. + func (l Logger) WithCallDepth(depth int) Logger { ++ if l.sink == nil { ++ return l ++ } + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } +@@ -345,6 +390,9 @@ func (l Logger) WithCallDepth(depth int) Logger { + // implementation does not support either of these, the original Logger will be + // returned. + func (l Logger) WithCallStackHelper() (func(), Logger) { ++ if l.sink == nil { ++ return func() {}, l ++ } + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) +@@ -357,6 +405,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) { + return helper, l + } + ++// IsZero returns true if this logger is an uninitialized zero value ++func (l Logger) IsZero() bool { ++ return l.sink == nil ++} ++ + // contextKey is how we find Loggers in a context.Context. + type contextKey struct{} + +@@ -427,22 +480,22 @@ type LogSink interface { + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. +- Info(level int, msg string, keysAndValues ...interface{}) ++ Info(level int, msg string, keysAndValues ...any) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. +- Error(err error, msg string, keysAndValues ...interface{}) ++ Error(err error, msg string, keysAndValues ...any) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. +- WithValues(keysAndValues ...interface{}) LogSink ++ WithValues(keysAndValues ...any) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink + } + +-// CallDepthLogSink represents a Logger that knows how to climb the call stack ++// CallDepthLogSink represents a LogSink that knows how to climb the call stack + // to identify the original call site and can offset the depth by a specified + // number of frames. This is useful for users who have helper functions + // between the "real" call site and the actual calls to Logger methods. +@@ -467,7 +520,7 @@ type CallDepthLogSink interface { + WithCallDepth(depth int) LogSink + } + +-// CallStackHelperLogSink represents a Logger that knows how to climb ++// CallStackHelperLogSink represents a LogSink that knows how to climb + // the call stack to identify the original call site and can skip + // intermediate helper functions if they mark themselves as + // helper. Go's testing package uses that approach. +@@ -506,5 +559,5 @@ type Marshaler interface { + // with exported fields + // + // It may return any value of any type. +- MarshalLog() interface{} ++ MarshalLog() any + } +diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go +index e54a76c7e3c..3d8d0cd3ae3 100644 +--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go ++++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go +@@ -7,6 +7,7 @@ package cmpopts + + import ( + "errors" ++ "fmt" + "math" + "reflect" + "time" +@@ -16,10 +17,10 @@ import ( + + func equateAlways(_, _ interface{}) bool { return true } + +-// EquateEmpty returns a Comparer option that determines all maps and slices ++// EquateEmpty returns a [cmp.Comparer] option that determines all maps and slices + // with a length of zero to be equal, regardless of whether they are nil. + // +-// EquateEmpty can be used in conjunction with SortSlices and SortMaps. ++// EquateEmpty can be used in conjunction with [SortSlices] and [SortMaps]. + func EquateEmpty() cmp.Option { + return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways)) + } +@@ -31,7 +32,7 @@ func isEmpty(x, y interface{}) bool { + (vx.Len() == 0 && vy.Len() == 0) + } + +-// EquateApprox returns a Comparer option that determines float32 or float64 ++// EquateApprox returns a [cmp.Comparer] option that determines float32 or float64 + // values to be equal if they are within a relative fraction or absolute margin. + // This option is not used when either x or y is NaN or infinite. + // +@@ -45,7 +46,7 @@ func isEmpty(x, y interface{}) bool { + // + // |x-y| ≤ max(fraction*min(|x|, |y|), margin) + // +-// EquateApprox can be used in conjunction with EquateNaNs. ++// EquateApprox can be used in conjunction with [EquateNaNs]. + func EquateApprox(fraction, margin float64) cmp.Option { + if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) { + panic("margin or fraction must be a non-negative number") +@@ -73,10 +74,10 @@ func (a approximator) compareF32(x, y float32) bool { + return a.compareF64(float64(x), float64(y)) + } + +-// EquateNaNs returns a Comparer option that determines float32 and float64 ++// EquateNaNs returns a [cmp.Comparer] option that determines float32 and float64 + // NaN values to be equal. + // +-// EquateNaNs can be used in conjunction with EquateApprox. ++// EquateNaNs can be used in conjunction with [EquateApprox]. + func EquateNaNs() cmp.Option { + return cmp.Options{ + cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)), +@@ -91,8 +92,8 @@ func areNaNsF32s(x, y float32) bool { + return areNaNsF64s(float64(x), float64(y)) + } + +-// EquateApproxTime returns a Comparer option that determines two non-zero +-// time.Time values to be equal if they are within some margin of one another. ++// EquateApproxTime returns a [cmp.Comparer] option that determines two non-zero ++// [time.Time] values to be equal if they are within some margin of one another. + // If both times have a monotonic clock reading, then the monotonic time + // difference will be used. The margin must be non-negative. + func EquateApproxTime(margin time.Duration) cmp.Option { +@@ -131,8 +132,8 @@ type anyError struct{} + func (anyError) Error() string { return "any error" } + func (anyError) Is(err error) bool { return err != nil } + +-// EquateErrors returns a Comparer option that determines errors to be equal +-// if errors.Is reports them to match. The AnyError error can be used to ++// EquateErrors returns a [cmp.Comparer] option that determines errors to be equal ++// if [errors.Is] reports them to match. The [AnyError] error can be used to + // match any non-nil error. + func EquateErrors() cmp.Option { + return cmp.FilterValues(areConcreteErrors, cmp.Comparer(compareErrors)) +@@ -154,3 +155,31 @@ func compareErrors(x, y interface{}) bool { + ye := y.(error) + return errors.Is(xe, ye) || errors.Is(ye, xe) + } ++ ++// EquateComparable returns a [cmp.Option] that determines equality ++// of comparable types by directly comparing them using the == operator in Go. ++// The types to compare are specified by passing a value of that type. ++// This option should only be used on types that are documented as being ++// safe for direct == comparison. For example, [net/netip.Addr] is documented ++// as being semantically safe to use with ==, while [time.Time] is documented ++// to discourage the use of == on time values. ++func EquateComparable(typs ...interface{}) cmp.Option { ++ types := make(typesFilter) ++ for _, typ := range typs { ++ switch t := reflect.TypeOf(typ); { ++ case !t.Comparable(): ++ panic(fmt.Sprintf("%T is not a comparable Go type", typ)) ++ case types[t]: ++ panic(fmt.Sprintf("%T is already specified", typ)) ++ default: ++ types[t] = true ++ } ++ } ++ return cmp.FilterPath(types.filter, cmp.Comparer(equateAny)) ++} ++ ++type typesFilter map[reflect.Type]bool ++ ++func (tf typesFilter) filter(p cmp.Path) bool { return tf[p.Last().Type()] } ++ ++func equateAny(x, y interface{}) bool { return x == y } +diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go +index 80c60617e40..fb84d11d70e 100644 +--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go ++++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go +@@ -14,7 +14,7 @@ import ( + "github.com/google/go-cmp/cmp/internal/function" + ) + +-// IgnoreFields returns an Option that ignores fields of the ++// IgnoreFields returns an [cmp.Option] that ignores fields of the + // given names on a single struct type. It respects the names of exported fields + // that are forwarded due to struct embedding. + // The struct type is specified by passing in a value of that type. +@@ -26,7 +26,7 @@ func IgnoreFields(typ interface{}, names ...string) cmp.Option { + return cmp.FilterPath(sf.filter, cmp.Ignore()) + } + +-// IgnoreTypes returns an Option that ignores all values assignable to ++// IgnoreTypes returns an [cmp.Option] that ignores all values assignable to + // certain types, which are specified by passing in a value of each type. + func IgnoreTypes(typs ...interface{}) cmp.Option { + tf := newTypeFilter(typs...) +@@ -59,10 +59,10 @@ func (tf typeFilter) filter(p cmp.Path) bool { + return false + } + +-// IgnoreInterfaces returns an Option that ignores all values or references of ++// IgnoreInterfaces returns an [cmp.Option] that ignores all values or references of + // values assignable to certain interface types. These interfaces are specified + // by passing in an anonymous struct with the interface types embedded in it. +-// For example, to ignore sync.Locker, pass in struct{sync.Locker}{}. ++// For example, to ignore [sync.Locker], pass in struct{sync.Locker}{}. + func IgnoreInterfaces(ifaces interface{}) cmp.Option { + tf := newIfaceFilter(ifaces) + return cmp.FilterPath(tf.filter, cmp.Ignore()) +@@ -107,7 +107,7 @@ func (tf ifaceFilter) filter(p cmp.Path) bool { + return false + } + +-// IgnoreUnexported returns an Option that only ignores the immediate unexported ++// IgnoreUnexported returns an [cmp.Option] that only ignores the immediate unexported + // fields of a struct, including anonymous fields of unexported types. + // In particular, unexported fields within the struct's exported fields + // of struct types, including anonymous fields, will not be ignored unless the +@@ -115,7 +115,7 @@ func (tf ifaceFilter) filter(p cmp.Path) bool { + // + // Avoid ignoring unexported fields of a type which you do not control (i.e. a + // type from another repository), as changes to the implementation of such types +-// may change how the comparison behaves. Prefer a custom Comparer instead. ++// may change how the comparison behaves. Prefer a custom [cmp.Comparer] instead. + func IgnoreUnexported(typs ...interface{}) cmp.Option { + ux := newUnexportedFilter(typs...) + return cmp.FilterPath(ux.filter, cmp.Ignore()) +@@ -148,7 +148,7 @@ func isExported(id string) bool { + return unicode.IsUpper(r) + } + +-// IgnoreSliceElements returns an Option that ignores elements of []V. ++// IgnoreSliceElements returns an [cmp.Option] that ignores elements of []V. + // The discard function must be of the form "func(T) bool" which is used to + // ignore slice elements of type V, where V is assignable to T. + // Elements are ignored if the function reports true. +@@ -176,7 +176,7 @@ func IgnoreSliceElements(discardFunc interface{}) cmp.Option { + }, cmp.Ignore()) + } + +-// IgnoreMapEntries returns an Option that ignores entries of map[K]V. ++// IgnoreMapEntries returns an [cmp.Option] that ignores entries of map[K]V. + // The discard function must be of the form "func(T, R) bool" which is used to + // ignore map entries of type K and V, where K and V are assignable to T and R. + // Entries are ignored if the function reports true. +diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go +index 0eb2a758c23..c6d09dae402 100644 +--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go ++++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go +@@ -13,7 +13,7 @@ import ( + "github.com/google/go-cmp/cmp/internal/function" + ) + +-// SortSlices returns a Transformer option that sorts all []V. ++// SortSlices returns a [cmp.Transformer] option that sorts all []V. + // The less function must be of the form "func(T, T) bool" which is used to + // sort any slice with element type V that is assignable to T. + // +@@ -25,7 +25,7 @@ import ( + // The less function does not have to be "total". That is, if !less(x, y) and + // !less(y, x) for two elements x and y, their relative order is maintained. + // +-// SortSlices can be used in conjunction with EquateEmpty. ++// SortSlices can be used in conjunction with [EquateEmpty]. + func SortSlices(lessFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessFunc) + if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { +@@ -82,13 +82,13 @@ func (ss sliceSorter) less(v reflect.Value, i, j int) bool { + return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool() + } + +-// SortMaps returns a Transformer option that flattens map[K]V types to be a ++// SortMaps returns a [cmp.Transformer] option that flattens map[K]V types to be a + // sorted []struct{K, V}. The less function must be of the form + // "func(T, T) bool" which is used to sort any map with key K that is + // assignable to T. + // +-// Flattening the map into a slice has the property that cmp.Equal is able to +-// use Comparers on K or the K.Equal method if it exists. ++// Flattening the map into a slice has the property that [cmp.Equal] is able to ++// use [cmp.Comparer] options on K or the K.Equal method if it exists. + // + // The less function must be: + // - Deterministic: less(x, y) == less(x, y) +@@ -96,7 +96,7 @@ func (ss sliceSorter) less(v reflect.Value, i, j int) bool { + // - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) + // - Total: if x != y, then either less(x, y) or less(y, x) + // +-// SortMaps can be used in conjunction with EquateEmpty. ++// SortMaps can be used in conjunction with [EquateEmpty]. + func SortMaps(lessFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessFunc) + if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { +diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go +index 8812443a2f5..25b4bd05bd7 100644 +--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go ++++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go +@@ -19,7 +19,7 @@ func (xf xformFilter) filter(p cmp.Path) bool { + return true + } + +-// AcyclicTransformer returns a Transformer with a filter applied that ensures ++// AcyclicTransformer returns a [cmp.Transformer] with a filter applied that ensures + // that the transformer cannot be recursively applied upon its own output. + // + // An example use case is a transformer that splits a string by lines: +@@ -28,7 +28,7 @@ func (xf xformFilter) filter(p cmp.Path) bool { + // return strings.Split(s, "\n") + // }) + // +-// Had this been an unfiltered Transformer instead, this would result in an ++// Had this been an unfiltered [cmp.Transformer] instead, this would result in an + // infinite cycle converting a string to []string to [][]string and so on. + func AcyclicTransformer(name string, xformFunc interface{}) cmp.Option { + xf := xformFilter{cmp.Transformer(name, xformFunc)} +diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go +index 087320da7f0..0f5b8a48c6b 100644 +--- a/vendor/github.com/google/go-cmp/cmp/compare.go ++++ b/vendor/github.com/google/go-cmp/cmp/compare.go +@@ -5,7 +5,7 @@ + // Package cmp determines equality of values. + // + // This package is intended to be a more powerful and safer alternative to +-// reflect.DeepEqual for comparing whether two values are semantically equal. ++// [reflect.DeepEqual] for comparing whether two values are semantically equal. + // It is intended to only be used in tests, as performance is not a goal and + // it may panic if it cannot compare the values. Its propensity towards + // panicking means that its unsuitable for production environments where a +@@ -18,16 +18,17 @@ + // For example, an equality function may report floats as equal so long as + // they are within some tolerance of each other. + // +-// - Types with an Equal method may use that method to determine equality. +-// This allows package authors to determine the equality operation +-// for the types that they define. ++// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method ++// to determine equality. This allows package authors to determine ++// the equality operation for the types that they define. + // + // - If no custom equality functions are used and no Equal method is defined, + // equality is determined by recursively comparing the primitive kinds on +-// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, ++// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual], + // unexported fields are not compared by default; they result in panics +-// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) +-// or explicitly compared using the Exporter option. ++// unless suppressed by using an [Ignore] option ++// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ++// or explicitly compared using the [Exporter] option. + package cmp + + import ( +@@ -45,14 +46,14 @@ import ( + // Equal reports whether x and y are equal by recursively applying the + // following rules in the given order to x and y and all of their sub-values: + // +-// - Let S be the set of all Ignore, Transformer, and Comparer options that ++// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that + // remain after applying all path filters, value filters, and type filters. +-// If at least one Ignore exists in S, then the comparison is ignored. +-// If the number of Transformer and Comparer options in S is non-zero, ++// If at least one [Ignore] exists in S, then the comparison is ignored. ++// If the number of [Transformer] and [Comparer] options in S is non-zero, + // then Equal panics because it is ambiguous which option to use. +-// If S contains a single Transformer, then use that to transform ++// If S contains a single [Transformer], then use that to transform + // the current values and recursively call Equal on the output values. +-// If S contains a single Comparer, then use that to compare the current values. ++// If S contains a single [Comparer], then use that to compare the current values. + // Otherwise, evaluation proceeds to the next rule. + // + // - If the values have an Equal method of the form "(T) Equal(T) bool" or +@@ -66,21 +67,22 @@ import ( + // Functions are only equal if they are both nil, otherwise they are unequal. + // + // Structs are equal if recursively calling Equal on all fields report equal. +-// If a struct contains unexported fields, Equal panics unless an Ignore option +-// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option +-// explicitly permits comparing the unexported field. ++// If a struct contains unexported fields, Equal panics unless an [Ignore] option ++// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field ++// or the [Exporter] option explicitly permits comparing the unexported field. + // + // Slices are equal if they are both nil or both non-nil, where recursively + // calling Equal on all non-ignored slice or array elements report equal. + // Empty non-nil slices and nil slices are not equal; to equate empty slices, +-// consider using cmpopts.EquateEmpty. ++// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. + // + // Maps are equal if they are both nil or both non-nil, where recursively + // calling Equal on all non-ignored map entries report equal. + // Map keys are equal according to the == operator. +-// To use custom comparisons for map keys, consider using cmpopts.SortMaps. ++// To use custom comparisons for map keys, consider using ++// [github.com/google/go-cmp/cmp/cmpopts.SortMaps]. + // Empty non-nil maps and nil maps are not equal; to equate empty maps, +-// consider using cmpopts.EquateEmpty. ++// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. + // + // Pointers and interfaces are equal if they are both nil or both non-nil, + // where they have the same underlying concrete type and recursively +diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export.go +similarity index 94% +rename from vendor/github.com/google/go-cmp/cmp/export_unsafe.go +rename to vendor/github.com/google/go-cmp/cmp/export.go +index e2c0f74e839..29f82fe6b2f 100644 +--- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go ++++ b/vendor/github.com/google/go-cmp/cmp/export.go +@@ -2,9 +2,6 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !purego +-// +build !purego +- + package cmp + + import ( +@@ -12,8 +9,6 @@ import ( + "unsafe" + ) + +-const supportExporters = true +- + // retrieveUnexportedField uses unsafe to forcibly retrieve any field from + // a struct such that the value has read-write permissions. + // +diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go +deleted file mode 100644 +index ae851fe53f2..00000000000 +--- a/vendor/github.com/google/go-cmp/cmp/export_panic.go ++++ /dev/null +@@ -1,16 +0,0 @@ +-// Copyright 2017, The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//go:build purego +-// +build purego +- +-package cmp +- +-import "reflect" +- +-const supportExporters = false +- +-func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { +- panic("no support for forcibly accessing unexported fields") +-} +diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go +similarity index 95% +rename from vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +rename to vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go +index 16e6860af6e..e5dfff69afa 100644 +--- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go ++++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go +@@ -2,9 +2,6 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !purego +-// +build !purego +- + package value + + import ( +diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +deleted file mode 100644 +index 1a71bfcbd39..00000000000 +--- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go ++++ /dev/null +@@ -1,34 +0,0 @@ +-// Copyright 2018, The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//go:build purego +-// +build purego +- +-package value +- +-import "reflect" +- +-// Pointer is an opaque typed pointer and is guaranteed to be comparable. +-type Pointer struct { +- p uintptr +- t reflect.Type +-} +- +-// PointerOf returns a Pointer from v, which must be a +-// reflect.Ptr, reflect.Slice, or reflect.Map. +-func PointerOf(v reflect.Value) Pointer { +- // NOTE: Storing a pointer as an uintptr is technically incorrect as it +- // assumes that the GC implementation does not use a moving collector. +- return Pointer{v.Pointer(), v.Type()} +-} +- +-// IsNil reports whether the pointer is nil. +-func (p Pointer) IsNil() bool { +- return p.p == 0 +-} +- +-// Uintptr returns the pointer as a uintptr. +-func (p Pointer) Uintptr() uintptr { +- return p.p +-} +diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go +index 1f9ca9c4892..754496f3b3f 100644 +--- a/vendor/github.com/google/go-cmp/cmp/options.go ++++ b/vendor/github.com/google/go-cmp/cmp/options.go +@@ -13,15 +13,15 @@ import ( + "github.com/google/go-cmp/cmp/internal/function" + ) + +-// Option configures for specific behavior of Equal and Diff. In particular, +-// the fundamental Option functions (Ignore, Transformer, and Comparer), ++// Option configures for specific behavior of [Equal] and [Diff]. In particular, ++// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]), + // configure how equality is determined. + // +-// The fundamental options may be composed with filters (FilterPath and +-// FilterValues) to control the scope over which they are applied. ++// The fundamental options may be composed with filters ([FilterPath] and ++// [FilterValues]) to control the scope over which they are applied. + // +-// The cmp/cmpopts package provides helper functions for creating options that +-// may be used with Equal and Diff. ++// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions ++// for creating options that may be used with [Equal] and [Diff]. + type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. +@@ -56,9 +56,9 @@ type core struct{} + + func (core) isCore() {} + +-// Options is a list of Option values that also satisfies the Option interface. ++// Options is a list of [Option] values that also satisfies the [Option] interface. + // Helper comparison packages may return an Options value when packing multiple +-// Option values into a single Option. When this package processes an Options, ++// [Option] values into a single [Option]. When this package processes an Options, + // it will be implicitly expanded into a flat list. + // + // Applying a filter on an Options is equivalent to applying that same filter +@@ -105,16 +105,16 @@ func (opts Options) String() string { + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) + } + +-// FilterPath returns a new Option where opt is only evaluated if filter f +-// returns true for the current Path in the value tree. ++// FilterPath returns a new [Option] where opt is only evaluated if filter f ++// returns true for the current [Path] in the value tree. + // + // This filter is called even if a slice element or map entry is missing and + // provides an opportunity to ignore such cases. The filter function must be + // symmetric such that the filter result is identical regardless of whether the + // missing value is from x or y. + // +-// The option passed in may be an Ignore, Transformer, Comparer, Options, or +-// a previously filtered Option. ++// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or ++// a previously filtered [Option]. + func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") +@@ -142,7 +142,7 @@ func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) + } + +-// FilterValues returns a new Option where opt is only evaluated if filter f, ++// FilterValues returns a new [Option] where opt is only evaluated if filter f, + // which is a function of the form "func(T, T) bool", returns true for the + // current pair of values being compared. If either value is invalid or + // the type of the values is not assignable to T, then this filter implicitly +@@ -154,8 +154,8 @@ func (f pathFilter) String() string { + // If T is an interface, it is possible that f is called with two values with + // different concrete types that both implement T. + // +-// The option passed in may be an Ignore, Transformer, Comparer, Options, or +-// a previously filtered Option. ++// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or ++// a previously filtered [Option]. + func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { +@@ -192,9 +192,9 @@ func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) + } + +-// Ignore is an Option that causes all comparisons to be ignored. +-// This value is intended to be combined with FilterPath or FilterValues. +-// It is an error to pass an unfiltered Ignore option to Equal. ++// Ignore is an [Option] that causes all comparisons to be ignored. ++// This value is intended to be combined with [FilterPath] or [FilterValues]. ++// It is an error to pass an unfiltered Ignore option to [Equal]. + func Ignore() Option { return ignore{} } + + type ignore struct{ core } +@@ -234,6 +234,8 @@ func (validator) apply(s *state, vx, vy reflect.Value) { + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" ++ } else if t.Comparable() { ++ help = "consider using cmpopts.EquateComparable to compare comparable Go types" + } + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. +@@ -254,7 +256,7 @@ const identRx = `[_\p{L}][_\p{L}\p{N}]*` + + var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +-// Transformer returns an Option that applies a transformation function that ++// Transformer returns an [Option] that applies a transformation function that + // converts values of a certain type into that of another. + // + // The transformer f must be a function "func(T) R" that converts values of +@@ -265,13 +267,14 @@ var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + // same transform to the output of itself (e.g., in the case where the + // input and output types are the same), an implicit filter is added such that + // a transformer is applicable only if that exact transformer is not already +-// in the tail of the Path since the last non-Transform step. ++// in the tail of the [Path] since the last non-[Transform] step. + // For situations where the implicit filter is still insufficient, +-// consider using cmpopts.AcyclicTransformer, which adds a filter +-// to prevent the transformer from being recursively applied upon itself. ++// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer], ++// which adds a filter to prevent the transformer from ++// being recursively applied upon itself. + // +-// The name is a user provided label that is used as the Transform.Name in the +-// transformation PathStep (and eventually shown in the Diff output). ++// The name is a user provided label that is used as the [Transform.Name] in the ++// transformation [PathStep] (and eventually shown in the [Diff] output). + // The name must be a valid identifier or qualified identifier in Go syntax. + // If empty, an arbitrary name is used. + func Transformer(name string, f interface{}) Option { +@@ -329,7 +332,7 @@ func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) + } + +-// Comparer returns an Option that determines whether two values are equal ++// Comparer returns an [Option] that determines whether two values are equal + // to each other. + // + // The comparer f must be a function "func(T, T) bool" and is implicitly +@@ -377,35 +380,32 @@ func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) + } + +-// Exporter returns an Option that specifies whether Equal is allowed to ++// Exporter returns an [Option] that specifies whether [Equal] is allowed to + // introspect into the unexported fields of certain struct types. + // + // Users of this option must understand that comparing on unexported fields + // from external packages is not safe since changes in the internal +-// implementation of some external package may cause the result of Equal ++// implementation of some external package may cause the result of [Equal] + // to unexpectedly change. However, it may be valid to use this option on types + // defined in an internal package where the semantic meaning of an unexported + // field is in the control of the user. + // +-// In many cases, a custom Comparer should be used instead that defines ++// In many cases, a custom [Comparer] should be used instead that defines + // equality as a function of the public API of a type rather than the underlying + // unexported implementation. + // +-// For example, the reflect.Type documentation defines equality to be determined ++// For example, the [reflect.Type] documentation defines equality to be determined + // by the == operator on the interface (essentially performing a shallow pointer +-// comparison) and most attempts to compare *regexp.Regexp types are interested ++// comparison) and most attempts to compare *[regexp.Regexp] types are interested + // in only checking that the regular expression strings are equal. +-// Both of these are accomplished using Comparers: ++// Both of these are accomplished using [Comparer] options: + // + // Comparer(func(x, y reflect.Type) bool { return x == y }) + // Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) + // +-// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +-// all unexported fields on specified struct types. ++// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported] ++// option can be used to ignore all unexported fields on specified struct types. + func Exporter(f func(reflect.Type) bool) Option { +- if !supportExporters { +- panic("Exporter is not supported on purego builds") +- } + return exporter(f) + } + +@@ -415,10 +415,10 @@ func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableO + panic("not implemented") + } + +-// AllowUnexported returns an Options that allows Equal to forcibly introspect ++// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect + // unexported fields of the specified struct types. + // +-// See Exporter for the proper use of this option. ++// See [Exporter] for the proper use of this option. + func AllowUnexported(types ...interface{}) Option { + m := make(map[reflect.Type]bool) + for _, typ := range types { +@@ -432,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { + } + + // Result represents the comparison result for a single node and +-// is provided by cmp when calling Report (see Reporter). ++// is provided by cmp when calling Report (see [Reporter]). + type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +@@ -445,7 +445,7 @@ func (r Result) Equal() bool { + } + + // ByIgnore reports whether the node is equal because it was ignored. +-// This never reports true if Equal reports false. ++// This never reports true if [Result.Equal] reports false. + func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 + } +@@ -455,7 +455,7 @@ func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 + } + +-// ByFunc reports whether a Comparer function determined equality. ++// ByFunc reports whether a [Comparer] function determined equality. + func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 + } +@@ -478,7 +478,7 @@ const ( + reportByCycle + ) + +-// Reporter is an Option that can be passed to Equal. When Equal traverses ++// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses + // the value trees, it calls PushStep as it descends into each node in the + // tree and PopStep as it ascend out of the node. The leaves of the tree are + // either compared (determined to be equal or not equal) or ignored and reported +diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go +index a0a588502ed..c3c1456423c 100644 +--- a/vendor/github.com/google/go-cmp/cmp/path.go ++++ b/vendor/github.com/google/go-cmp/cmp/path.go +@@ -14,9 +14,9 @@ import ( + "github.com/google/go-cmp/cmp/internal/value" + ) + +-// Path is a list of PathSteps describing the sequence of operations to get ++// Path is a list of [PathStep] describing the sequence of operations to get + // from some root type to the current position in the value tree. +-// The first Path element is always an operation-less PathStep that exists ++// The first Path element is always an operation-less [PathStep] that exists + // simply to identify the initial type. + // + // When traversing structs with embedded structs, the embedded struct will +@@ -29,8 +29,13 @@ type Path []PathStep + // a value's tree structure. Users of this package never need to implement + // these types as values of this type will be returned by this package. + // +-// Implementations of this interface are +-// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. ++// Implementations of this interface: ++// - [StructField] ++// - [SliceIndex] ++// - [MapIndex] ++// - [Indirect] ++// - [TypeAssertion] ++// - [Transform] + type PathStep interface { + String() string + +@@ -70,8 +75,9 @@ func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] + } + +-// Last returns the last PathStep in the Path. +-// If the path is empty, this returns a non-nil PathStep that reports a nil Type. ++// Last returns the last [PathStep] in the Path. ++// If the path is empty, this returns a non-nil [PathStep] ++// that reports a nil [PathStep.Type]. + func (pa Path) Last() PathStep { + return pa.Index(-1) + } +@@ -79,7 +85,8 @@ func (pa Path) Last() PathStep { + // Index returns the ith step in the Path and supports negative indexing. + // A negative index starts counting from the tail of the Path such that -1 + // refers to the last step, -2 refers to the second-to-last step, and so on. +-// If index is invalid, this returns a non-nil PathStep that reports a nil Type. ++// If index is invalid, this returns a non-nil [PathStep] ++// that reports a nil [PathStep.Type]. + func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i +@@ -168,7 +175,8 @@ func (ps pathStep) String() string { + return fmt.Sprintf("{%s}", s) + } + +-// StructField represents a struct field access on a field called Name. ++// StructField is a [PathStep] that represents a struct field access ++// on a field called [StructField.Name]. + type StructField struct{ *structField } + type structField struct { + pathStep +@@ -204,10 +212,11 @@ func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + func (sf StructField) Name() string { return sf.name } + + // Index is the index of the field in the parent struct type. +-// See reflect.Type.Field. ++// See [reflect.Type.Field]. + func (sf StructField) Index() int { return sf.idx } + +-// SliceIndex is an index operation on a slice or array at some index Key. ++// SliceIndex is a [PathStep] that represents an index operation on ++// a slice or array at some index [SliceIndex.Key]. + type SliceIndex struct{ *sliceIndex } + type sliceIndex struct { + pathStep +@@ -247,12 +256,12 @@ func (si SliceIndex) Key() int { + // all of the indexes to be shifted. If an index is -1, then that + // indicates that the element does not exist in the associated slice. + // +-// Key is guaranteed to return -1 if and only if the indexes returned +-// by SplitKeys are not the same. SplitKeys will never return -1 for ++// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes ++// returned by SplitKeys are not the same. SplitKeys will never return -1 for + // both indexes. + func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +-// MapIndex is an index operation on a map at some index Key. ++// MapIndex is a [PathStep] that represents an index operation on a map at some index Key. + type MapIndex struct{ *mapIndex } + type mapIndex struct { + pathStep +@@ -266,7 +275,7 @@ func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", + // Key is the value of the map key. + func (mi MapIndex) Key() reflect.Value { return mi.key } + +-// Indirect represents pointer indirection on the parent type. ++// Indirect is a [PathStep] that represents pointer indirection on the parent type. + type Indirect struct{ *indirect } + type indirect struct { + pathStep +@@ -276,7 +285,7 @@ func (in Indirect) Type() reflect.Type { return in.typ } + func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } + func (in Indirect) String() string { return "*" } + +-// TypeAssertion represents a type assertion on an interface. ++// TypeAssertion is a [PathStep] that represents a type assertion on an interface. + type TypeAssertion struct{ *typeAssertion } + type typeAssertion struct { + pathStep +@@ -286,7 +295,8 @@ func (ta TypeAssertion) Type() reflect.Type { return ta.typ } + func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } + func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } + +-// Transform is a transformation from the parent type to the current type. ++// Transform is a [PathStep] that represents a transformation ++// from the parent type to the current type. + type Transform struct{ *transform } + type transform struct { + pathStep +@@ -297,13 +307,13 @@ func (tf Transform) Type() reflect.Type { return tf.typ } + func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } + func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +-// Name is the name of the Transformer. ++// Name is the name of the [Transformer]. + func (tf Transform) Name() string { return tf.trans.name } + + // Func is the function pointer to the transformer function. + func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +-// Option returns the originally constructed Transformer option. ++// Option returns the originally constructed [Transformer] option. + // The == operator can be used to detect the exact option used. + func (tf Transform) Option() Option { return tf.trans } + +diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go +index 2ab41fad3fb..e39f42284ee 100644 +--- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go ++++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go +@@ -199,7 +199,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, + break + } + sf := t.Field(i) +- if supportExporters && !isExported(sf.Name) { ++ if !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) +diff --git a/vendor/github.com/google/s2a-go/.gitignore b/vendor/github.com/google/s2a-go/.gitignore +new file mode 100644 +index 00000000000..01764d1cdf2 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/.gitignore +@@ -0,0 +1,6 @@ ++# Ignore binaries without extension ++//example/client/client ++//example/server/server ++//internal/v2/fakes2av2_server/fakes2av2_server ++ ++.idea/ +\ No newline at end of file +diff --git a/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md b/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md +new file mode 100644 +index 00000000000..dc079b4d66e +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md +@@ -0,0 +1,93 @@ ++# Code of Conduct ++ ++## Our Pledge ++ ++In the interest of fostering an open and welcoming environment, we as ++contributors and maintainers pledge to making participation in our project and ++our community a harassment-free experience for everyone, regardless of age, body ++size, disability, ethnicity, gender identity and expression, level of ++experience, education, socio-economic status, nationality, personal appearance, ++race, religion, or sexual identity and orientation. ++ ++## Our Standards ++ ++Examples of behavior that contributes to creating a positive environment ++include: ++ ++* Using welcoming and inclusive language ++* Being respectful of differing viewpoints and experiences ++* Gracefully accepting constructive criticism ++* Focusing on what is best for the community ++* Showing empathy towards other community members ++ ++Examples of unacceptable behavior by participants include: ++ ++* The use of sexualized language or imagery and unwelcome sexual attention or ++ advances ++* Trolling, insulting/derogatory comments, and personal or political attacks ++* Public or private harassment ++* Publishing others' private information, such as a physical or electronic ++ address, without explicit permission ++* Other conduct which could reasonably be considered inappropriate in a ++ professional setting ++ ++## Our Responsibilities ++ ++Project maintainers are responsible for clarifying the standards of acceptable ++behavior and are expected to take appropriate and fair corrective action in ++response to any instances of unacceptable behavior. ++ ++Project maintainers have the right and responsibility to remove, edit, or reject ++comments, commits, code, wiki edits, issues, and other contributions that are ++not aligned to this Code of Conduct, or to ban temporarily or permanently any ++contributor for other behaviors that they deem inappropriate, threatening, ++offensive, or harmful. ++ ++## Scope ++ ++This Code of Conduct applies both within project spaces and in public spaces ++when an individual is representing the project or its community. Examples of ++representing a project or community include using an official project e-mail ++address, posting via an official social media account, or acting as an appointed ++representative at an online or offline event. Representation of a project may be ++further defined and clarified by project maintainers. ++ ++This Code of Conduct also applies outside the project spaces when the Project ++Steward has a reasonable belief that an individual's behavior may have a ++negative impact on the project or its community. ++ ++## Conflict Resolution ++ ++We do not believe that all conflict is bad; healthy debate and disagreement ++often yield positive results. However, it is never okay to be disrespectful or ++to engage in behavior that violates the project’s code of conduct. ++ ++If you see someone violating the code of conduct, you are encouraged to address ++the behavior directly with those involved. Many issues can be resolved quickly ++and easily, and this gives people more control over the outcome of their ++dispute. If you are unable to resolve the matter for any reason, or if the ++behavior is threatening or harassing, report it. We are dedicated to providing ++an environment where participants feel welcome and safe. ++ ++Reports should be directed to *[PROJECT STEWARD NAME(s) AND EMAIL(s)]*, the ++Project Steward(s) for *[PROJECT NAME]*. It is the Project Steward’s duty to ++receive and address reported violations of the code of conduct. They will then ++work with a committee consisting of representatives from the Open Source ++Programs Office and the Google Open Source Strategy team. If for any reason you ++are uncomfortable reaching out to the Project Steward, please email ++opensource@google.com. ++ ++We will investigate every complaint, but you may not receive a direct response. ++We will use our discretion in determining when and how to follow up on reported ++incidents, which may range from not taking action to permanent expulsion from ++the project and project-sponsored spaces. We will notify the accused of the ++report and provide them an opportunity to discuss it before any action is taken. ++The identity of the reporter will be omitted from the details of the report ++supplied to the accused. In potentially harmful situations, such as ongoing ++harassment or threats to anyone's safety, we may take action without notice. ++ ++## Attribution ++ ++This Code of Conduct is adapted from the Contributor Covenant, version 1.4, ++available at ++https://www.contributor-covenant.org/version/1/4/code-of-conduct.html +diff --git a/vendor/github.com/google/s2a-go/CONTRIBUTING.md b/vendor/github.com/google/s2a-go/CONTRIBUTING.md +new file mode 100644 +index 00000000000..22b241cb732 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/CONTRIBUTING.md +@@ -0,0 +1,29 @@ ++# How to Contribute ++ ++We'd love to accept your patches and contributions to this project. There are ++just a few small guidelines you need to follow. ++ ++## Contributor License Agreement ++ ++Contributions to this project must be accompanied by a Contributor License ++Agreement (CLA). You (or your employer) retain the copyright to your ++contribution; this simply gives us permission to use and redistribute your ++contributions as part of the project. Head over to ++ to see your current agreements on file or ++to sign a new one. ++ ++You generally only need to submit a CLA once, so if you've already submitted one ++(even if it was for a different project), you probably don't need to do it ++again. ++ ++## Code reviews ++ ++All submissions, including submissions by project members, require review. We ++use GitHub pull requests for this purpose. Consult ++[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more ++information on using pull requests. ++ ++## Community Guidelines ++ ++This project follows ++[Google's Open Source Community Guidelines](https://opensource.google/conduct/). +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE b/vendor/github.com/google/s2a-go/LICENSE.md +similarity index 99% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE +rename to vendor/github.com/google/s2a-go/LICENSE.md +index 261eeb9e9f8..d6456956733 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE ++++ b/vendor/github.com/google/s2a-go/LICENSE.md +@@ -1,3 +1,4 @@ ++ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ +diff --git a/vendor/github.com/google/s2a-go/README.md b/vendor/github.com/google/s2a-go/README.md +new file mode 100644 +index 00000000000..d566950f385 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/README.md +@@ -0,0 +1,17 @@ ++# Secure Session Agent Client Libraries ++ ++The Secure Session Agent is a service that enables a workload to offload select ++operations from the mTLS handshake and protects a workload's private key ++material from exfiltration. Specifically, the workload asks the Secure Session ++Agent for the TLS configuration to use during the handshake, to perform private ++key operations, and to validate the peer certificate chain. The Secure Session ++Agent's client libraries enable applications to communicate with the Secure ++Session Agent during the TLS handshake, and to encrypt traffic to the peer ++after the TLS handshake is complete. ++ ++This repository contains the source code for the Secure Session Agent's Go ++client libraries, which allow gRPC-Go applications to use the Secure Session ++Agent. This repository supports the Bazel and Golang build systems. ++ ++All code in this repository is experimental and subject to change. We do not ++guarantee API stability at this time. +diff --git a/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go b/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go +new file mode 100644 +index 00000000000..034d1b912ca +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go +@@ -0,0 +1,167 @@ ++/* ++ * ++ * Copyright 2023 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package fallback provides default implementations of fallback options when S2A fails. ++package fallback ++ ++import ( ++ "context" ++ "crypto/tls" ++ "fmt" ++ "net" ++ ++ "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/grpclog" ++) ++ ++const ( ++ alpnProtoStrH2 = "h2" ++ alpnProtoStrHTTP = "http/1.1" ++ defaultHTTPSPort = "443" ++) ++ ++// FallbackTLSConfigGRPC is a tls.Config used by the DefaultFallbackClientHandshakeFunc function. ++// It supports GRPC use case, thus the alpn is set to 'h2'. ++var FallbackTLSConfigGRPC = tls.Config{ ++ MinVersion: tls.VersionTLS13, ++ ClientSessionCache: nil, ++ NextProtos: []string{alpnProtoStrH2}, ++} ++ ++// FallbackTLSConfigHTTP is a tls.Config used by the DefaultFallbackDialerAndAddress func. ++// It supports the HTTP use case and the alpn is set to both 'http/1.1' and 'h2'. ++var FallbackTLSConfigHTTP = tls.Config{ ++ MinVersion: tls.VersionTLS13, ++ ClientSessionCache: nil, ++ NextProtos: []string{alpnProtoStrH2, alpnProtoStrHTTP}, ++} ++ ++// ClientHandshake establishes a TLS connection and returns it, plus its auth info. ++// Inputs: ++// ++// targetServer: the server attempted with S2A. ++// conn: the tcp connection to the server at address targetServer that was passed into S2A's ClientHandshake func. ++// If fallback is successful, the `conn` should be closed. ++// err: the error encountered when performing the client-side TLS handshake with S2A. ++type ClientHandshake func(ctx context.Context, targetServer string, conn net.Conn, err error) (net.Conn, credentials.AuthInfo, error) ++ ++// DefaultFallbackClientHandshakeFunc returns a ClientHandshake function, ++// which establishes a TLS connection to the provided fallbackAddr, returns the new connection and its auth info. ++// Example use: ++// ++// transportCreds, _ = s2a.NewClientCreds(&s2a.ClientOptions{ ++// S2AAddress: s2aAddress, ++// FallbackOpts: &s2a.FallbackOptions{ // optional ++// FallbackClientHandshakeFunc: fallback.DefaultFallbackClientHandshakeFunc(fallbackAddr), ++// }, ++// }) ++// ++// The fallback server's certificate must be verifiable using OS root store. ++// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, ++// it uses default port 443. ++// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, ++// and min TLS version is set to 1.3. ++func DefaultFallbackClientHandshakeFunc(fallbackAddr string) (ClientHandshake, error) { ++ var fallbackDialer = tls.Dialer{Config: &FallbackTLSConfigGRPC} ++ return defaultFallbackClientHandshakeFuncInternal(fallbackAddr, fallbackDialer.DialContext) ++} ++ ++func defaultFallbackClientHandshakeFuncInternal(fallbackAddr string, dialContextFunc func(context.Context, string, string) (net.Conn, error)) (ClientHandshake, error) { ++ fallbackServerAddr, err := processFallbackAddr(fallbackAddr) ++ if err != nil { ++ if grpclog.V(1) { ++ grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) ++ } ++ return nil, err ++ } ++ return func(ctx context.Context, targetServer string, conn net.Conn, s2aErr error) (net.Conn, credentials.AuthInfo, error) { ++ fbConn, fbErr := dialContextFunc(ctx, "tcp", fallbackServerAddr) ++ if fbErr != nil { ++ grpclog.Infof("dialing to fallback server %s failed: %v", fallbackServerAddr, fbErr) ++ return nil, nil, fmt.Errorf("dialing to fallback server %s failed: %v; S2A client handshake with %s error: %w", fallbackServerAddr, fbErr, targetServer, s2aErr) ++ } ++ ++ tc, success := fbConn.(*tls.Conn) ++ if !success { ++ grpclog.Infof("the connection with fallback server is expected to be tls but isn't") ++ return nil, nil, fmt.Errorf("the connection with fallback server is expected to be tls but isn't; S2A client handshake with %s error: %w", targetServer, s2aErr) ++ } ++ ++ tlsInfo := credentials.TLSInfo{ ++ State: tc.ConnectionState(), ++ CommonAuthInfo: credentials.CommonAuthInfo{ ++ SecurityLevel: credentials.PrivacyAndIntegrity, ++ }, ++ } ++ if grpclog.V(1) { ++ grpclog.Infof("ConnectionState.NegotiatedProtocol: %v", tc.ConnectionState().NegotiatedProtocol) ++ grpclog.Infof("ConnectionState.HandshakeComplete: %v", tc.ConnectionState().HandshakeComplete) ++ grpclog.Infof("ConnectionState.ServerName: %v", tc.ConnectionState().ServerName) ++ } ++ conn.Close() ++ return fbConn, tlsInfo, nil ++ }, nil ++} ++ ++// DefaultFallbackDialerAndAddress returns a TLS dialer and the network address to dial. ++// Example use: ++// ++// fallbackDialer, fallbackServerAddr := fallback.DefaultFallbackDialerAndAddress(fallbackAddr) ++// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ ++// S2AAddress: s2aAddress, // required ++// FallbackOpts: &s2a.FallbackOptions{ ++// FallbackDialer: &s2a.FallbackDialer{ ++// Dialer: fallbackDialer, ++// ServerAddr: fallbackServerAddr, ++// }, ++// }, ++// }) ++// ++// The fallback server's certificate should be verifiable using OS root store. ++// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, ++// it uses default port 443. ++// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, ++// and min TLS version is set to 1.3. ++func DefaultFallbackDialerAndAddress(fallbackAddr string) (*tls.Dialer, string, error) { ++ fallbackServerAddr, err := processFallbackAddr(fallbackAddr) ++ if err != nil { ++ if grpclog.V(1) { ++ grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) ++ } ++ return nil, "", err ++ } ++ return &tls.Dialer{Config: &FallbackTLSConfigHTTP}, fallbackServerAddr, nil ++} ++ ++func processFallbackAddr(fallbackAddr string) (string, error) { ++ var fallbackServerAddr string ++ var err error ++ ++ if fallbackAddr == "" { ++ return "", fmt.Errorf("empty fallback address") ++ } ++ _, _, err = net.SplitHostPort(fallbackAddr) ++ if err != nil { ++ // fallbackAddr does not have port suffix ++ fallbackServerAddr = net.JoinHostPort(fallbackAddr, defaultHTTPSPort) ++ } else { ++ // FallbackServerAddr already has port suffix ++ fallbackServerAddr = fallbackAddr ++ } ++ return fallbackServerAddr, nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go b/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go +new file mode 100644 +index 00000000000..aa3967f9d1f +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go +@@ -0,0 +1,119 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package authinfo provides authentication and authorization information that ++// results from the TLS handshake. ++package authinfo ++ ++import ( ++ "errors" ++ ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ contextpb "github.com/google/s2a-go/internal/proto/s2a_context_go_proto" ++ grpcpb "github.com/google/s2a-go/internal/proto/s2a_go_proto" ++ "google.golang.org/grpc/credentials" ++) ++ ++var _ credentials.AuthInfo = (*S2AAuthInfo)(nil) ++ ++const s2aAuthType = "s2a" ++ ++// S2AAuthInfo exposes authentication and authorization information from the ++// S2A session result to the gRPC stack. ++type S2AAuthInfo struct { ++ s2aContext *contextpb.S2AContext ++ commonAuthInfo credentials.CommonAuthInfo ++} ++ ++// NewS2AAuthInfo returns a new S2AAuthInfo object from the S2A session result. ++func NewS2AAuthInfo(result *grpcpb.SessionResult) (credentials.AuthInfo, error) { ++ return newS2AAuthInfo(result) ++} ++ ++func newS2AAuthInfo(result *grpcpb.SessionResult) (*S2AAuthInfo, error) { ++ if result == nil { ++ return nil, errors.New("NewS2aAuthInfo given nil session result") ++ } ++ return &S2AAuthInfo{ ++ s2aContext: &contextpb.S2AContext{ ++ ApplicationProtocol: result.GetApplicationProtocol(), ++ TlsVersion: result.GetState().GetTlsVersion(), ++ Ciphersuite: result.GetState().GetTlsCiphersuite(), ++ PeerIdentity: result.GetPeerIdentity(), ++ LocalIdentity: result.GetLocalIdentity(), ++ PeerCertFingerprint: result.GetPeerCertFingerprint(), ++ LocalCertFingerprint: result.GetLocalCertFingerprint(), ++ IsHandshakeResumed: result.GetState().GetIsHandshakeResumed(), ++ }, ++ commonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, ++ }, nil ++} ++ ++// AuthType returns the authentication type. ++func (s *S2AAuthInfo) AuthType() string { ++ return s2aAuthType ++} ++ ++// ApplicationProtocol returns the application protocol, e.g. "grpc". ++func (s *S2AAuthInfo) ApplicationProtocol() string { ++ return s.s2aContext.GetApplicationProtocol() ++} ++ ++// TLSVersion returns the TLS version negotiated during the handshake. ++func (s *S2AAuthInfo) TLSVersion() commonpb.TLSVersion { ++ return s.s2aContext.GetTlsVersion() ++} ++ ++// Ciphersuite returns the ciphersuite negotiated during the handshake. ++func (s *S2AAuthInfo) Ciphersuite() commonpb.Ciphersuite { ++ return s.s2aContext.GetCiphersuite() ++} ++ ++// PeerIdentity returns the authenticated identity of the peer. ++func (s *S2AAuthInfo) PeerIdentity() *commonpb.Identity { ++ return s.s2aContext.GetPeerIdentity() ++} ++ ++// LocalIdentity returns the local identity of the application used during ++// session setup. ++func (s *S2AAuthInfo) LocalIdentity() *commonpb.Identity { ++ return s.s2aContext.GetLocalIdentity() ++} ++ ++// PeerCertFingerprint returns the SHA256 hash of the peer certificate used in ++// the S2A handshake. ++func (s *S2AAuthInfo) PeerCertFingerprint() []byte { ++ return s.s2aContext.GetPeerCertFingerprint() ++} ++ ++// LocalCertFingerprint returns the SHA256 hash of the local certificate used ++// in the S2A handshake. ++func (s *S2AAuthInfo) LocalCertFingerprint() []byte { ++ return s.s2aContext.GetLocalCertFingerprint() ++} ++ ++// IsHandshakeResumed returns true if a cached session was used to resume ++// the handshake. ++func (s *S2AAuthInfo) IsHandshakeResumed() bool { ++ return s.s2aContext.GetIsHandshakeResumed() ++} ++ ++// SecurityLevel returns the security level of the connection. ++func (s *S2AAuthInfo) SecurityLevel() credentials.SecurityLevel { ++ return s.commonAuthInfo.SecurityLevel ++} +diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go b/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go +new file mode 100644 +index 00000000000..8297c9a9746 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go +@@ -0,0 +1,438 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package handshaker communicates with the S2A handshaker service. ++package handshaker ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ "io" ++ "net" ++ "sync" ++ ++ "github.com/google/s2a-go/internal/authinfo" ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" ++ "github.com/google/s2a-go/internal/record" ++ "github.com/google/s2a-go/internal/tokenmanager" ++ grpc "google.golang.org/grpc" ++ "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/grpclog" ++) ++ ++var ( ++ // appProtocol contains the application protocol accepted by the handshaker. ++ appProtocol = "grpc" ++ // frameLimit is the maximum size of a frame in bytes. ++ frameLimit = 1024 * 64 ++ // peerNotRespondingError is the error thrown when the peer doesn't respond. ++ errPeerNotResponding = errors.New("peer is not responding and re-connection should be attempted") ++) ++ ++// Handshaker defines a handshaker interface. ++type Handshaker interface { ++ // ClientHandshake starts and completes a TLS handshake from the client side, ++ // and returns a secure connection along with additional auth information. ++ ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) ++ // ServerHandshake starts and completes a TLS handshake from the server side, ++ // and returns a secure connection along with additional auth information. ++ ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) ++ // Close terminates the Handshaker. It should be called when the handshake ++ // is complete. ++ Close() error ++} ++ ++// ClientHandshakerOptions contains the options needed to configure the S2A ++// handshaker service on the client-side. ++type ClientHandshakerOptions struct { ++ // MinTLSVersion specifies the min TLS version supported by the client. ++ MinTLSVersion commonpb.TLSVersion ++ // MaxTLSVersion specifies the max TLS version supported by the client. ++ MaxTLSVersion commonpb.TLSVersion ++ // TLSCiphersuites is the ordered list of ciphersuites supported by the ++ // client. ++ TLSCiphersuites []commonpb.Ciphersuite ++ // TargetIdentities contains a list of allowed server identities. One of the ++ // target identities should match the peer identity in the handshake ++ // result; otherwise, the handshake fails. ++ TargetIdentities []*commonpb.Identity ++ // LocalIdentity is the local identity of the client application. If none is ++ // provided, then the S2A will choose the default identity. ++ LocalIdentity *commonpb.Identity ++ // TargetName is the allowed server name, which may be used for server ++ // authorization check by the S2A if it is provided. ++ TargetName string ++ // EnsureProcessSessionTickets allows users to wait and ensure that all ++ // available session tickets are sent to S2A before a process completes. ++ EnsureProcessSessionTickets *sync.WaitGroup ++} ++ ++// ServerHandshakerOptions contains the options needed to configure the S2A ++// handshaker service on the server-side. ++type ServerHandshakerOptions struct { ++ // MinTLSVersion specifies the min TLS version supported by the server. ++ MinTLSVersion commonpb.TLSVersion ++ // MaxTLSVersion specifies the max TLS version supported by the server. ++ MaxTLSVersion commonpb.TLSVersion ++ // TLSCiphersuites is the ordered list of ciphersuites supported by the ++ // server. ++ TLSCiphersuites []commonpb.Ciphersuite ++ // LocalIdentities is the list of local identities that may be assumed by ++ // the server. If no local identity is specified, then the S2A chooses a ++ // default local identity. ++ LocalIdentities []*commonpb.Identity ++} ++ ++// s2aHandshaker performs a TLS handshake using the S2A handshaker service. ++type s2aHandshaker struct { ++ // stream is used to communicate with the S2A handshaker service. ++ stream s2apb.S2AService_SetUpSessionClient ++ // conn is the connection to the peer. ++ conn net.Conn ++ // clientOpts should be non-nil iff the handshaker is client-side. ++ clientOpts *ClientHandshakerOptions ++ // serverOpts should be non-nil iff the handshaker is server-side. ++ serverOpts *ServerHandshakerOptions ++ // isClient determines if the handshaker is client or server side. ++ isClient bool ++ // hsAddr stores the address of the S2A handshaker service. ++ hsAddr string ++ // tokenManager manages access tokens for authenticating to S2A. ++ tokenManager tokenmanager.AccessTokenManager ++ // localIdentities is the set of local identities for whom the ++ // tokenManager should fetch a token when preparing a request to be ++ // sent to S2A. ++ localIdentities []*commonpb.Identity ++} ++ ++// NewClientHandshaker creates an s2aHandshaker instance that performs a ++// client-side TLS handshake using the S2A handshaker service. ++func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ClientHandshakerOptions) (Handshaker, error) { ++ stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) ++ if err != nil { ++ return nil, err ++ } ++ tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() ++ if err != nil { ++ grpclog.Infof("failed to create single token access token manager: %v", err) ++ } ++ return newClientHandshaker(stream, c, hsAddr, opts, tokenManager), nil ++} ++ ++func newClientHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ClientHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { ++ var localIdentities []*commonpb.Identity ++ if opts != nil { ++ localIdentities = []*commonpb.Identity{opts.LocalIdentity} ++ } ++ return &s2aHandshaker{ ++ stream: stream, ++ conn: c, ++ clientOpts: opts, ++ isClient: true, ++ hsAddr: hsAddr, ++ tokenManager: tokenManager, ++ localIdentities: localIdentities, ++ } ++} ++ ++// NewServerHandshaker creates an s2aHandshaker instance that performs a ++// server-side TLS handshake using the S2A handshaker service. ++func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ServerHandshakerOptions) (Handshaker, error) { ++ stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) ++ if err != nil { ++ return nil, err ++ } ++ tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() ++ if err != nil { ++ grpclog.Infof("failed to create single token access token manager: %v", err) ++ } ++ return newServerHandshaker(stream, c, hsAddr, opts, tokenManager), nil ++} ++ ++func newServerHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ServerHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { ++ var localIdentities []*commonpb.Identity ++ if opts != nil { ++ localIdentities = opts.LocalIdentities ++ } ++ return &s2aHandshaker{ ++ stream: stream, ++ conn: c, ++ serverOpts: opts, ++ isClient: false, ++ hsAddr: hsAddr, ++ tokenManager: tokenManager, ++ localIdentities: localIdentities, ++ } ++} ++ ++// ClientHandshake performs a client-side TLS handshake using the S2A handshaker ++// service. When complete, returns a TLS connection. ++func (h *s2aHandshaker) ClientHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { ++ if !h.isClient { ++ return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client-side handshake") ++ } ++ // Extract the hostname from the target name. The target name is assumed to be an authority. ++ hostname, _, err := net.SplitHostPort(h.clientOpts.TargetName) ++ if err != nil { ++ // If the target name had no host port or could not be parsed, use it as is. ++ hostname = h.clientOpts.TargetName ++ } ++ ++ // Prepare a client start message to send to the S2A handshaker service. ++ req := &s2apb.SessionReq{ ++ ReqOneof: &s2apb.SessionReq_ClientStart{ ++ ClientStart: &s2apb.ClientSessionStartReq{ ++ ApplicationProtocols: []string{appProtocol}, ++ MinTlsVersion: h.clientOpts.MinTLSVersion, ++ MaxTlsVersion: h.clientOpts.MaxTLSVersion, ++ TlsCiphersuites: h.clientOpts.TLSCiphersuites, ++ TargetIdentities: h.clientOpts.TargetIdentities, ++ LocalIdentity: h.clientOpts.LocalIdentity, ++ TargetName: hostname, ++ }, ++ }, ++ AuthMechanisms: h.getAuthMechanisms(), ++ } ++ conn, result, err := h.setUpSession(req) ++ if err != nil { ++ return nil, nil, err ++ } ++ authInfo, err := authinfo.NewS2AAuthInfo(result) ++ if err != nil { ++ return nil, nil, err ++ } ++ return conn, authInfo, nil ++} ++ ++// ServerHandshake performs a server-side TLS handshake using the S2A handshaker ++// service. When complete, returns a TLS connection. ++func (h *s2aHandshaker) ServerHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { ++ if h.isClient { ++ return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server-side handshake") ++ } ++ p := make([]byte, frameLimit) ++ n, err := h.conn.Read(p) ++ if err != nil { ++ return nil, nil, err ++ } ++ // Prepare a server start message to send to the S2A handshaker service. ++ req := &s2apb.SessionReq{ ++ ReqOneof: &s2apb.SessionReq_ServerStart{ ++ ServerStart: &s2apb.ServerSessionStartReq{ ++ ApplicationProtocols: []string{appProtocol}, ++ MinTlsVersion: h.serverOpts.MinTLSVersion, ++ MaxTlsVersion: h.serverOpts.MaxTLSVersion, ++ TlsCiphersuites: h.serverOpts.TLSCiphersuites, ++ LocalIdentities: h.serverOpts.LocalIdentities, ++ InBytes: p[:n], ++ }, ++ }, ++ AuthMechanisms: h.getAuthMechanisms(), ++ } ++ conn, result, err := h.setUpSession(req) ++ if err != nil { ++ return nil, nil, err ++ } ++ authInfo, err := authinfo.NewS2AAuthInfo(result) ++ if err != nil { ++ return nil, nil, err ++ } ++ return conn, authInfo, nil ++} ++ ++// setUpSession proxies messages between the peer and the S2A handshaker ++// service. ++func (h *s2aHandshaker) setUpSession(req *s2apb.SessionReq) (net.Conn, *s2apb.SessionResult, error) { ++ resp, err := h.accessHandshakerService(req) ++ if err != nil { ++ return nil, nil, err ++ } ++ // Check if the returned status is an error. ++ if resp.GetStatus() != nil { ++ if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { ++ return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) ++ } ++ } ++ // Calculate the extra unread bytes from the Session. Attempting to consume ++ // more than the bytes sent will throw an error. ++ var extra []byte ++ if req.GetServerStart() != nil { ++ if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { ++ return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") ++ } ++ extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] ++ } ++ result, extra, err := h.processUntilDone(resp, extra) ++ if err != nil { ++ return nil, nil, err ++ } ++ if result.GetLocalIdentity() == nil { ++ return nil, nil, errors.New("local identity must be populated in session result") ++ } ++ ++ // Create a new TLS record protocol using the Session Result. ++ newConn, err := record.NewConn(&record.ConnParameters{ ++ NetConn: h.conn, ++ Ciphersuite: result.GetState().GetTlsCiphersuite(), ++ TLSVersion: result.GetState().GetTlsVersion(), ++ InTrafficSecret: result.GetState().GetInKey(), ++ OutTrafficSecret: result.GetState().GetOutKey(), ++ UnusedBuf: extra, ++ InSequence: result.GetState().GetInSequence(), ++ OutSequence: result.GetState().GetOutSequence(), ++ HSAddr: h.hsAddr, ++ ConnectionID: result.GetState().GetConnectionId(), ++ LocalIdentity: result.GetLocalIdentity(), ++ EnsureProcessSessionTickets: h.ensureProcessSessionTickets(), ++ }) ++ if err != nil { ++ return nil, nil, err ++ } ++ return newConn, result, nil ++} ++ ++func (h *s2aHandshaker) ensureProcessSessionTickets() *sync.WaitGroup { ++ if h.clientOpts == nil { ++ return nil ++ } ++ return h.clientOpts.EnsureProcessSessionTickets ++} ++ ++// accessHandshakerService sends the session request to the S2A handshaker ++// service and returns the session response. ++func (h *s2aHandshaker) accessHandshakerService(req *s2apb.SessionReq) (*s2apb.SessionResp, error) { ++ if err := h.stream.Send(req); err != nil { ++ return nil, err ++ } ++ resp, err := h.stream.Recv() ++ if err != nil { ++ return nil, err ++ } ++ return resp, nil ++} ++ ++// processUntilDone continues proxying messages between the peer and the S2A ++// handshaker service until the handshaker service returns the SessionResult at ++// the end of the handshake or an error occurs. ++func (h *s2aHandshaker) processUntilDone(resp *s2apb.SessionResp, unusedBytes []byte) (*s2apb.SessionResult, []byte, error) { ++ for { ++ if len(resp.OutFrames) > 0 { ++ if _, err := h.conn.Write(resp.OutFrames); err != nil { ++ return nil, nil, err ++ } ++ } ++ if resp.Result != nil { ++ return resp.Result, unusedBytes, nil ++ } ++ buf := make([]byte, frameLimit) ++ n, err := h.conn.Read(buf) ++ if err != nil && err != io.EOF { ++ return nil, nil, err ++ } ++ // If there is nothing to send to the handshaker service and nothing is ++ // received from the peer, then we are stuck. This covers the case when ++ // the peer is not responding. Note that handshaker service connection ++ // issues are caught in accessHandshakerService before we even get ++ // here. ++ if len(resp.OutFrames) == 0 && n == 0 { ++ return nil, nil, errPeerNotResponding ++ } ++ // Append extra bytes from the previous interaction with the handshaker ++ // service with the current buffer read from conn. ++ p := append(unusedBytes, buf[:n]...) ++ // From here on, p and unusedBytes point to the same slice. ++ resp, err = h.accessHandshakerService(&s2apb.SessionReq{ ++ ReqOneof: &s2apb.SessionReq_Next{ ++ Next: &s2apb.SessionNextReq{ ++ InBytes: p, ++ }, ++ }, ++ AuthMechanisms: h.getAuthMechanisms(), ++ }) ++ if err != nil { ++ return nil, nil, err ++ } ++ ++ // Cache the local identity returned by S2A, if it is populated. This ++ // overwrites any existing local identities. This is done because, once the ++ // S2A has selected a local identity, then only that local identity should ++ // be asserted in future requests until the end of the current handshake. ++ if resp.GetLocalIdentity() != nil { ++ h.localIdentities = []*commonpb.Identity{resp.GetLocalIdentity()} ++ } ++ ++ // Set unusedBytes based on the handshaker service response. ++ if resp.GetBytesConsumed() > uint32(len(p)) { ++ return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") ++ } ++ unusedBytes = p[resp.GetBytesConsumed():] ++ } ++} ++ ++// Close shuts down the handshaker and the stream to the S2A handshaker service ++// when the handshake is complete. It should be called when the caller obtains ++// the secure connection at the end of the handshake. ++func (h *s2aHandshaker) Close() error { ++ return h.stream.CloseSend() ++} ++ ++func (h *s2aHandshaker) getAuthMechanisms() []*s2apb.AuthenticationMechanism { ++ if h.tokenManager == nil { ++ return nil ++ } ++ // First handle the special case when no local identities have been provided ++ // by the application. In this case, an AuthenticationMechanism with no local ++ // identity will be sent. ++ if len(h.localIdentities) == 0 { ++ token, err := h.tokenManager.DefaultToken() ++ if err != nil { ++ grpclog.Infof("unable to get token for empty local identity: %v", err) ++ return nil ++ } ++ return []*s2apb.AuthenticationMechanism{ ++ { ++ MechanismOneof: &s2apb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }, ++ } ++ } ++ ++ // Next, handle the case where the application (or the S2A) has provided ++ // one or more local identities. ++ var authMechanisms []*s2apb.AuthenticationMechanism ++ for _, localIdentity := range h.localIdentities { ++ token, err := h.tokenManager.Token(localIdentity) ++ if err != nil { ++ grpclog.Infof("unable to get token for local identity %v: %v", localIdentity, err) ++ continue ++ } ++ ++ authMechanism := &s2apb.AuthenticationMechanism{ ++ Identity: localIdentity, ++ MechanismOneof: &s2apb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ } ++ authMechanisms = append(authMechanisms, authMechanism) ++ } ++ return authMechanisms ++} +diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go +new file mode 100644 +index 00000000000..49573af887c +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go +@@ -0,0 +1,99 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package service is a utility for calling the S2A handshaker service. ++package service ++ ++import ( ++ "context" ++ "net" ++ "os" ++ "strings" ++ "sync" ++ "time" ++ ++ "google.golang.org/appengine" ++ "google.golang.org/appengine/socket" ++ grpc "google.golang.org/grpc" ++ "google.golang.org/grpc/grpclog" ++) ++ ++// An environment variable, if true, opportunistically use AppEngine-specific dialer to call S2A. ++const enableAppEngineDialerEnv = "S2A_ENABLE_APP_ENGINE_DIALER" ++ ++var ( ++ // appEngineDialerHook is an AppEngine-specific dial option that is set ++ // during init time. If nil, then the application is not running on Google ++ // AppEngine. ++ appEngineDialerHook func(context.Context) grpc.DialOption ++ // mu guards hsConnMap and hsDialer. ++ mu sync.Mutex ++ // hsConnMap represents a mapping from an S2A handshaker service address ++ // to a corresponding connection to an S2A handshaker service instance. ++ hsConnMap = make(map[string]*grpc.ClientConn) ++ // hsDialer will be reassigned in tests. ++ hsDialer = grpc.Dial ++) ++ ++func init() { ++ if !appengine.IsAppEngine() && !appengine.IsDevAppServer() { ++ return ++ } ++ appEngineDialerHook = func(ctx context.Context) grpc.DialOption { ++ return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { ++ return socket.DialTimeout(ctx, "tcp", addr, timeout) ++ }) ++ } ++} ++ ++// Dial dials the S2A handshaker service. If a connection has already been ++// established, this function returns it. Otherwise, a new connection is ++// created. ++func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { ++ mu.Lock() ++ defer mu.Unlock() ++ ++ hsConn, ok := hsConnMap[handshakerServiceAddress] ++ if !ok { ++ // Create a new connection to the S2A handshaker service. Note that ++ // this connection stays open until the application is closed. ++ grpcOpts := []grpc.DialOption{ ++ grpc.WithInsecure(), ++ } ++ if enableAppEngineDialer() && appEngineDialerHook != nil { ++ if grpclog.V(1) { ++ grpclog.Info("Using AppEngine-specific dialer to talk to S2A.") ++ } ++ grpcOpts = append(grpcOpts, appEngineDialerHook(context.Background())) ++ } ++ var err error ++ hsConn, err = hsDialer(handshakerServiceAddress, grpcOpts...) ++ if err != nil { ++ return nil, err ++ } ++ hsConnMap[handshakerServiceAddress] = hsConn ++ } ++ return hsConn, nil ++} ++ ++func enableAppEngineDialer() bool { ++ if strings.ToLower(os.Getenv(enableAppEngineDialerEnv)) == "true" { ++ return true ++ } ++ return false ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +new file mode 100644 +index 00000000000..16278a1d995 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +@@ -0,0 +1,389 @@ ++// Copyright 2021 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/common/common.proto ++ ++package common_go_proto ++ ++import ( ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++// The ciphersuites supported by S2A. The name determines the confidentiality, ++// and authentication ciphers as well as the hash algorithm used for PRF in ++// TLS 1.2 or HKDF in TLS 1.3. Thus, the components of the name are: ++// - AEAD -- for encryption and authentication, e.g., AES_128_GCM. ++// - Hash algorithm -- used in PRF or HKDF, e.g., SHA256. ++type Ciphersuite int32 ++ ++const ( ++ Ciphersuite_AES_128_GCM_SHA256 Ciphersuite = 0 ++ Ciphersuite_AES_256_GCM_SHA384 Ciphersuite = 1 ++ Ciphersuite_CHACHA20_POLY1305_SHA256 Ciphersuite = 2 ++) ++ ++// Enum value maps for Ciphersuite. ++var ( ++ Ciphersuite_name = map[int32]string{ ++ 0: "AES_128_GCM_SHA256", ++ 1: "AES_256_GCM_SHA384", ++ 2: "CHACHA20_POLY1305_SHA256", ++ } ++ Ciphersuite_value = map[string]int32{ ++ "AES_128_GCM_SHA256": 0, ++ "AES_256_GCM_SHA384": 1, ++ "CHACHA20_POLY1305_SHA256": 2, ++ } ++) ++ ++func (x Ciphersuite) Enum() *Ciphersuite { ++ p := new(Ciphersuite) ++ *p = x ++ return p ++} ++ ++func (x Ciphersuite) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_common_common_proto_enumTypes[0].Descriptor() ++} ++ ++func (Ciphersuite) Type() protoreflect.EnumType { ++ return &file_internal_proto_common_common_proto_enumTypes[0] ++} ++ ++func (x Ciphersuite) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use Ciphersuite.Descriptor instead. ++func (Ciphersuite) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} ++} ++ ++// The TLS versions supported by S2A's handshaker module. ++type TLSVersion int32 ++ ++const ( ++ TLSVersion_TLS1_2 TLSVersion = 0 ++ TLSVersion_TLS1_3 TLSVersion = 1 ++) ++ ++// Enum value maps for TLSVersion. ++var ( ++ TLSVersion_name = map[int32]string{ ++ 0: "TLS1_2", ++ 1: "TLS1_3", ++ } ++ TLSVersion_value = map[string]int32{ ++ "TLS1_2": 0, ++ "TLS1_3": 1, ++ } ++) ++ ++func (x TLSVersion) Enum() *TLSVersion { ++ p := new(TLSVersion) ++ *p = x ++ return p ++} ++ ++func (x TLSVersion) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_common_common_proto_enumTypes[1].Descriptor() ++} ++ ++func (TLSVersion) Type() protoreflect.EnumType { ++ return &file_internal_proto_common_common_proto_enumTypes[1] ++} ++ ++func (x TLSVersion) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use TLSVersion.Descriptor instead. ++func (TLSVersion) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_common_common_proto_rawDescGZIP(), []int{1} ++} ++ ++type Identity struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // Types that are assignable to IdentityOneof: ++ // ++ // *Identity_SpiffeId ++ // *Identity_Hostname ++ // *Identity_Uid ++ // *Identity_MdbUsername ++ // *Identity_GaiaId ++ IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` ++ // Additional identity-specific attributes. ++ Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` ++} ++ ++func (x *Identity) Reset() { ++ *x = Identity{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_common_common_proto_msgTypes[0] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *Identity) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*Identity) ProtoMessage() {} ++ ++func (x *Identity) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_common_common_proto_msgTypes[0] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use Identity.ProtoReflect.Descriptor instead. ++func (*Identity) Descriptor() ([]byte, []int) { ++ return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} ++} ++ ++func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { ++ if m != nil { ++ return m.IdentityOneof ++ } ++ return nil ++} ++ ++func (x *Identity) GetSpiffeId() string { ++ if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { ++ return x.SpiffeId ++ } ++ return "" ++} ++ ++func (x *Identity) GetHostname() string { ++ if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { ++ return x.Hostname ++ } ++ return "" ++} ++ ++func (x *Identity) GetUid() string { ++ if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { ++ return x.Uid ++ } ++ return "" ++} ++ ++func (x *Identity) GetMdbUsername() string { ++ if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { ++ return x.MdbUsername ++ } ++ return "" ++} ++ ++func (x *Identity) GetGaiaId() string { ++ if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { ++ return x.GaiaId ++ } ++ return "" ++} ++ ++func (x *Identity) GetAttributes() map[string]string { ++ if x != nil { ++ return x.Attributes ++ } ++ return nil ++} ++ ++type isIdentity_IdentityOneof interface { ++ isIdentity_IdentityOneof() ++} ++ ++type Identity_SpiffeId struct { ++ // The SPIFFE ID of a connection endpoint. ++ SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` ++} ++ ++type Identity_Hostname struct { ++ // The hostname of a connection endpoint. ++ Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` ++} ++ ++type Identity_Uid struct { ++ // The UID of a connection endpoint. ++ Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` ++} ++ ++type Identity_MdbUsername struct { ++ // The MDB username of a connection endpoint. ++ MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` ++} ++ ++type Identity_GaiaId struct { ++ // The Gaia ID of a connection endpoint. ++ GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` ++} ++ ++func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} ++ ++func (*Identity_Hostname) isIdentity_IdentityOneof() {} ++ ++func (*Identity_Uid) isIdentity_IdentityOneof() {} ++ ++func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} ++ ++func (*Identity_GaiaId) isIdentity_IdentityOneof() {} ++ ++var File_internal_proto_common_common_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_common_common_proto_rawDesc = []byte{ ++ 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, ++ 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, ++ 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, ++ 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, ++ 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, ++ 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, ++ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, ++ 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, ++ 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, ++ 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, ++ 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, ++ 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, ++ 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, ++ 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, ++ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, ++ 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, ++ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, ++ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, ++ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, ++ 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, ++ 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, ++ 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, ++ 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, ++ 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, ++ 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, ++ 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, ++ 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, ++ 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, ++ 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, ++ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, ++ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, ++ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, ++ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_common_common_proto_rawDescOnce sync.Once ++ file_internal_proto_common_common_proto_rawDescData = file_internal_proto_common_common_proto_rawDesc ++) ++ ++func file_internal_proto_common_common_proto_rawDescGZIP() []byte { ++ file_internal_proto_common_common_proto_rawDescOnce.Do(func() { ++ file_internal_proto_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_common_common_proto_rawDescData) ++ }) ++ return file_internal_proto_common_common_proto_rawDescData ++} ++ ++var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) ++var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) ++var file_internal_proto_common_common_proto_goTypes = []interface{}{ ++ (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite ++ (TLSVersion)(0), // 1: s2a.proto.TLSVersion ++ (*Identity)(nil), // 2: s2a.proto.Identity ++ nil, // 3: s2a.proto.Identity.AttributesEntry ++} ++var file_internal_proto_common_common_proto_depIdxs = []int32{ ++ 3, // 0: s2a.proto.Identity.attributes:type_name -> s2a.proto.Identity.AttributesEntry ++ 1, // [1:1] is the sub-list for method output_type ++ 1, // [1:1] is the sub-list for method input_type ++ 1, // [1:1] is the sub-list for extension type_name ++ 1, // [1:1] is the sub-list for extension extendee ++ 0, // [0:1] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_common_common_proto_init() } ++func file_internal_proto_common_common_proto_init() { ++ if File_internal_proto_common_common_proto != nil { ++ return ++ } ++ if !protoimpl.UnsafeEnabled { ++ file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*Identity); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ } ++ file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ ++ (*Identity_SpiffeId)(nil), ++ (*Identity_Hostname)(nil), ++ (*Identity_Uid)(nil), ++ (*Identity_MdbUsername)(nil), ++ (*Identity_GaiaId)(nil), ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_common_common_proto_rawDesc, ++ NumEnums: 2, ++ NumMessages: 2, ++ NumExtensions: 0, ++ NumServices: 0, ++ }, ++ GoTypes: file_internal_proto_common_common_proto_goTypes, ++ DependencyIndexes: file_internal_proto_common_common_proto_depIdxs, ++ EnumInfos: file_internal_proto_common_common_proto_enumTypes, ++ MessageInfos: file_internal_proto_common_common_proto_msgTypes, ++ }.Build() ++ File_internal_proto_common_common_proto = out.File ++ file_internal_proto_common_common_proto_rawDesc = nil ++ file_internal_proto_common_common_proto_goTypes = nil ++ file_internal_proto_common_common_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +new file mode 100644 +index 00000000000..f4f763ae102 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +@@ -0,0 +1,267 @@ ++// Copyright 2021 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/s2a_context/s2a_context.proto ++ ++package s2a_context_go_proto ++ ++import ( ++ common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++type S2AContext struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The application protocol negotiated for this connection, e.g., 'grpc'. ++ ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` ++ // The TLS version number that the S2A's handshaker module used to set up the ++ // session. ++ TlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` ++ // The TLS ciphersuite negotiated by the S2A's handshaker module. ++ Ciphersuite common_go_proto.Ciphersuite `protobuf:"varint,3,opt,name=ciphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"ciphersuite,omitempty"` ++ // The authenticated identity of the peer. ++ PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` ++ // The local identity used during session setup. This could be: ++ // - The local identity that the client specifies in ClientSessionStartReq. ++ // - One of the local identities that the server specifies in ++ // ServerSessionStartReq. ++ // - If neither client or server specifies local identities, the S2A picks the ++ // default one. In this case, this field will contain that identity. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The SHA256 hash of the peer certificate used in the handshake. ++ PeerCertFingerprint []byte `protobuf:"bytes,6,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` ++ // The SHA256 hash of the local certificate used in the handshake. ++ LocalCertFingerprint []byte `protobuf:"bytes,7,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` ++ // Set to true if a cached session was reused to resume the handshake. ++ IsHandshakeResumed bool `protobuf:"varint,8,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` ++} ++ ++func (x *S2AContext) Reset() { ++ *x = S2AContext{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *S2AContext) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*S2AContext) ProtoMessage() {} ++ ++func (x *S2AContext) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. ++func (*S2AContext) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} ++} ++ ++func (x *S2AContext) GetApplicationProtocol() string { ++ if x != nil { ++ return x.ApplicationProtocol ++ } ++ return "" ++} ++ ++func (x *S2AContext) GetTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.TlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *S2AContext) GetCiphersuite() common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.Ciphersuite ++ } ++ return common_go_proto.Ciphersuite(0) ++} ++ ++func (x *S2AContext) GetPeerIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.PeerIdentity ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetPeerCertFingerprint() []byte { ++ if x != nil { ++ return x.PeerCertFingerprint ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetLocalCertFingerprint() []byte { ++ if x != nil { ++ return x.LocalCertFingerprint ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetIsHandshakeResumed() bool { ++ if x != nil { ++ return x.IsHandshakeResumed ++ } ++ return false ++} ++ ++var File_internal_proto_s2a_context_s2a_context_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_s2a_context_s2a_context_proto_rawDesc = []byte{ ++ 0x0a, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, ++ 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, ++ 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, ++ 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, ++ 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x03, ++ 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x31, 0x0a, 0x14, ++ 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, ++ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, ++ 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, ++ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, ++ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, ++ 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, ++ 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, ++ 0x75, 0x69, 0x74, 0x65, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, ++ 0x65, 0x12, 0x38, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, ++ 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, ++ 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, ++ 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, ++ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, 0x72, 0x5f, ++ 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, ++ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, ++ 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6c, ++ 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, ++ 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, ++ 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, ++ 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, ++ 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, ++ 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, ++ 0x6d, 0x65, 0x64, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, ++ 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, ++ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, 0x61, 0x5f, ++ 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce sync.Once ++ file_internal_proto_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_s2a_context_s2a_context_proto_rawDesc ++) ++ ++func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { ++ file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { ++ file_internal_proto_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_context_s2a_context_proto_rawDescData) ++ }) ++ return file_internal_proto_s2a_context_s2a_context_proto_rawDescData ++} ++ ++var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) ++var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ ++ (*S2AContext)(nil), // 0: s2a.proto.S2AContext ++ (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion ++ (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite ++ (*common_go_proto.Identity)(nil), // 3: s2a.proto.Identity ++} ++var file_internal_proto_s2a_context_s2a_context_proto_depIdxs = []int32{ ++ 1, // 0: s2a.proto.S2AContext.tls_version:type_name -> s2a.proto.TLSVersion ++ 2, // 1: s2a.proto.S2AContext.ciphersuite:type_name -> s2a.proto.Ciphersuite ++ 3, // 2: s2a.proto.S2AContext.peer_identity:type_name -> s2a.proto.Identity ++ 3, // 3: s2a.proto.S2AContext.local_identity:type_name -> s2a.proto.Identity ++ 4, // [4:4] is the sub-list for method output_type ++ 4, // [4:4] is the sub-list for method input_type ++ 4, // [4:4] is the sub-list for extension type_name ++ 4, // [4:4] is the sub-list for extension extendee ++ 0, // [0:4] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_s2a_context_s2a_context_proto_init() } ++func file_internal_proto_s2a_context_s2a_context_proto_init() { ++ if File_internal_proto_s2a_context_s2a_context_proto != nil { ++ return ++ } ++ if !protoimpl.UnsafeEnabled { ++ file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*S2AContext); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_s2a_context_s2a_context_proto_rawDesc, ++ NumEnums: 0, ++ NumMessages: 1, ++ NumExtensions: 0, ++ NumServices: 0, ++ }, ++ GoTypes: file_internal_proto_s2a_context_s2a_context_proto_goTypes, ++ DependencyIndexes: file_internal_proto_s2a_context_s2a_context_proto_depIdxs, ++ MessageInfos: file_internal_proto_s2a_context_s2a_context_proto_msgTypes, ++ }.Build() ++ File_internal_proto_s2a_context_s2a_context_proto = out.File ++ file_internal_proto_s2a_context_s2a_context_proto_rawDesc = nil ++ file_internal_proto_s2a_context_s2a_context_proto_goTypes = nil ++ file_internal_proto_s2a_context_s2a_context_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +new file mode 100644 +index 00000000000..0a86ebee592 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +@@ -0,0 +1,1377 @@ ++// Copyright 2021 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/s2a/s2a.proto ++ ++package s2a_go_proto ++ ++import ( ++ common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++type AuthenticationMechanism struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // (Optional) Application may specify an identity associated to an ++ // authentication mechanism. Otherwise, S2A assumes that the authentication ++ // mechanism is associated with the default identity. If the default identity ++ // cannot be determined, session setup fails. ++ Identity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` ++ // Types that are assignable to MechanismOneof: ++ // ++ // *AuthenticationMechanism_Token ++ MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` ++} ++ ++func (x *AuthenticationMechanism) Reset() { ++ *x = AuthenticationMechanism{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *AuthenticationMechanism) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*AuthenticationMechanism) ProtoMessage() {} ++ ++func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. ++func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{0} ++} ++ ++func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.Identity ++ } ++ return nil ++} ++ ++func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { ++ if m != nil { ++ return m.MechanismOneof ++ } ++ return nil ++} ++ ++func (x *AuthenticationMechanism) GetToken() string { ++ if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { ++ return x.Token ++ } ++ return "" ++} ++ ++type isAuthenticationMechanism_MechanismOneof interface { ++ isAuthenticationMechanism_MechanismOneof() ++} ++ ++type AuthenticationMechanism_Token struct { ++ // A token that the application uses to authenticate itself to the S2A. ++ Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` ++} ++ ++func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} ++ ++type ClientSessionStartReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The application protocols supported by the client, e.g., "grpc". ++ ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` ++ // (Optional) The minimum TLS version number that the S2A's handshaker module ++ // will use to set up the session. If this field is not provided, S2A will use ++ // the minimum version it supports. ++ MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` ++ // (Optional) The maximum TLS version number that the S2A's handshaker module ++ // will use to set up the session. If this field is not provided, S2A will use ++ // the maximum version it supports. ++ MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` ++ // The TLS ciphersuites that the client is willing to support. ++ TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` ++ // (Optional) Describes which server identities are acceptable by the client. ++ // If target identities are provided and none of them matches the peer ++ // identity of the server, session setup fails. ++ TargetIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` ++ // (Optional) Application may specify a local identity. Otherwise, S2A chooses ++ // the default local identity. If the default identity cannot be determined, ++ // session setup fails. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,6,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The target name that is used by S2A to configure SNI in the TLS handshake. ++ // It is also used to perform server authorization check if avaiable. This ++ // check is intended to verify that the peer authenticated identity is ++ // authorized to run a service with the target name. ++ // This field MUST only contain the host portion of the server address. It ++ // MUST not contain the scheme or the port number. For example, if the server ++ // address is dns://www.example.com:443, the value of this field should be ++ // set to www.example.com. ++ TargetName string `protobuf:"bytes,7,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` ++} ++ ++func (x *ClientSessionStartReq) Reset() { ++ *x = ClientSessionStartReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ClientSessionStartReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ClientSessionStartReq) ProtoMessage() {} ++ ++func (x *ClientSessionStartReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ClientSessionStartReq.ProtoReflect.Descriptor instead. ++func (*ClientSessionStartReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{1} ++} ++ ++func (x *ClientSessionStartReq) GetApplicationProtocols() []string { ++ if x != nil { ++ return x.ApplicationProtocols ++ } ++ return nil ++} ++ ++func (x *ClientSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MinTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *ClientSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MaxTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *ClientSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.TlsCiphersuites ++ } ++ return nil ++} ++ ++func (x *ClientSessionStartReq) GetTargetIdentities() []*common_go_proto.Identity { ++ if x != nil { ++ return x.TargetIdentities ++ } ++ return nil ++} ++ ++func (x *ClientSessionStartReq) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *ClientSessionStartReq) GetTargetName() string { ++ if x != nil { ++ return x.TargetName ++ } ++ return "" ++} ++ ++type ServerSessionStartReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The application protocols supported by the server, e.g., "grpc". ++ ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` ++ // (Optional) The minimum TLS version number that the S2A's handshaker module ++ // will use to set up the session. If this field is not provided, S2A will use ++ // the minimum version it supports. ++ MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` ++ // (Optional) The maximum TLS version number that the S2A's handshaker module ++ // will use to set up the session. If this field is not provided, S2A will use ++ // the maximum version it supports. ++ MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` ++ // The TLS ciphersuites that the server is willing to support. ++ TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` ++ // (Optional) A list of local identities supported by the server, if ++ // specified. Otherwise, S2A chooses the default local identity. If the ++ // default identity cannot be determined, session setup fails. ++ LocalIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` ++ // The byte representation of the first handshake message received from the ++ // client peer. It is possible that this first message is split into multiple ++ // chunks. In this case, the first chunk is sent using this field and the ++ // following chunks are sent using the in_bytes field of SessionNextReq ++ // Specifically, if the client peer is using S2A, this field contains the ++ // bytes in the out_frames field of SessionResp message that the client peer ++ // received from its S2A after initiating the handshake. ++ InBytes []byte `protobuf:"bytes,6,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` ++} ++ ++func (x *ServerSessionStartReq) Reset() { ++ *x = ServerSessionStartReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ServerSessionStartReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ServerSessionStartReq) ProtoMessage() {} ++ ++func (x *ServerSessionStartReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ServerSessionStartReq.ProtoReflect.Descriptor instead. ++func (*ServerSessionStartReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{2} ++} ++ ++func (x *ServerSessionStartReq) GetApplicationProtocols() []string { ++ if x != nil { ++ return x.ApplicationProtocols ++ } ++ return nil ++} ++ ++func (x *ServerSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MinTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *ServerSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MaxTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *ServerSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.TlsCiphersuites ++ } ++ return nil ++} ++ ++func (x *ServerSessionStartReq) GetLocalIdentities() []*common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentities ++ } ++ return nil ++} ++ ++func (x *ServerSessionStartReq) GetInBytes() []byte { ++ if x != nil { ++ return x.InBytes ++ } ++ return nil ++} ++ ++type SessionNextReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The byte representation of session setup, i.e., handshake messages. ++ // Specifically: ++ // - All handshake messages sent from the server to the client. ++ // - All, except for the first, handshake messages sent from the client to ++ // the server. Note that the first message is communicated to S2A using the ++ // in_bytes field of ServerSessionStartReq. ++ // ++ // If the peer is using S2A, this field contains the bytes in the out_frames ++ // field of SessionResp message that the peer received from its S2A. ++ InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` ++} ++ ++func (x *SessionNextReq) Reset() { ++ *x = SessionNextReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionNextReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionNextReq) ProtoMessage() {} ++ ++func (x *SessionNextReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionNextReq.ProtoReflect.Descriptor instead. ++func (*SessionNextReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{3} ++} ++ ++func (x *SessionNextReq) GetInBytes() []byte { ++ if x != nil { ++ return x.InBytes ++ } ++ return nil ++} ++ ++type ResumptionTicketReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The byte representation of a NewSessionTicket message received from the ++ // server. ++ InBytes [][]byte `protobuf:"bytes,1,rep,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` ++ // A connection identifier that was created and sent by S2A at the end of a ++ // handshake. ++ ConnectionId uint64 `protobuf:"varint,2,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` ++ // The local identity that was used by S2A during session setup and included ++ // in |SessionResult|. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++} ++ ++func (x *ResumptionTicketReq) Reset() { ++ *x = ResumptionTicketReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ResumptionTicketReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ResumptionTicketReq) ProtoMessage() {} ++ ++func (x *ResumptionTicketReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ResumptionTicketReq.ProtoReflect.Descriptor instead. ++func (*ResumptionTicketReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{4} ++} ++ ++func (x *ResumptionTicketReq) GetInBytes() [][]byte { ++ if x != nil { ++ return x.InBytes ++ } ++ return nil ++} ++ ++func (x *ResumptionTicketReq) GetConnectionId() uint64 { ++ if x != nil { ++ return x.ConnectionId ++ } ++ return 0 ++} ++ ++func (x *ResumptionTicketReq) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++type SessionReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // Types that are assignable to ReqOneof: ++ // ++ // *SessionReq_ClientStart ++ // *SessionReq_ServerStart ++ // *SessionReq_Next ++ // *SessionReq_ResumptionTicket ++ ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` ++ // (Optional) The authentication mechanisms that the client wishes to use to ++ // authenticate to the S2A, ordered by preference. The S2A will always use the ++ // first authentication mechanism that appears in the list and is supported by ++ // the S2A. ++ AuthMechanisms []*AuthenticationMechanism `protobuf:"bytes,5,rep,name=auth_mechanisms,json=authMechanisms,proto3" json:"auth_mechanisms,omitempty"` ++} ++ ++func (x *SessionReq) Reset() { ++ *x = SessionReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionReq) ProtoMessage() {} ++ ++func (x *SessionReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. ++func (*SessionReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{5} ++} ++ ++func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { ++ if m != nil { ++ return m.ReqOneof ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetClientStart() *ClientSessionStartReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_ClientStart); ok { ++ return x.ClientStart ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetServerStart() *ServerSessionStartReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_ServerStart); ok { ++ return x.ServerStart ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetNext() *SessionNextReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_Next); ok { ++ return x.Next ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetResumptionTicket() *ResumptionTicketReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_ResumptionTicket); ok { ++ return x.ResumptionTicket ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetAuthMechanisms() []*AuthenticationMechanism { ++ if x != nil { ++ return x.AuthMechanisms ++ } ++ return nil ++} ++ ++type isSessionReq_ReqOneof interface { ++ isSessionReq_ReqOneof() ++} ++ ++type SessionReq_ClientStart struct { ++ // The client session setup request message. ++ ClientStart *ClientSessionStartReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` ++} ++ ++type SessionReq_ServerStart struct { ++ // The server session setup request message. ++ ServerStart *ServerSessionStartReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` ++} ++ ++type SessionReq_Next struct { ++ // The next session setup message request message. ++ Next *SessionNextReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` ++} ++ ++type SessionReq_ResumptionTicket struct { ++ // The resumption ticket that is received from the server. This message is ++ // only accepted by S2A if it is running as a client and if it is received ++ // after session setup is complete. If S2A is running as a server and it ++ // receives this message, the session is terminated. ++ ResumptionTicket *ResumptionTicketReq `protobuf:"bytes,4,opt,name=resumption_ticket,json=resumptionTicket,proto3,oneof"` ++} ++ ++func (*SessionReq_ClientStart) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_ServerStart) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_Next) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_ResumptionTicket) isSessionReq_ReqOneof() {} ++ ++type SessionState struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The TLS version number that the S2A's handshaker module used to set up the ++ // session. ++ TlsVersion common_go_proto.TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` ++ // The TLS ciphersuite negotiated by the S2A's handshaker module. ++ TlsCiphersuite common_go_proto.Ciphersuite `protobuf:"varint,2,opt,name=tls_ciphersuite,json=tlsCiphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuite,omitempty"` ++ // The sequence number of the next, incoming, TLS record. ++ InSequence uint64 `protobuf:"varint,3,opt,name=in_sequence,json=inSequence,proto3" json:"in_sequence,omitempty"` ++ // The sequence number of the next, outgoing, TLS record. ++ OutSequence uint64 `protobuf:"varint,4,opt,name=out_sequence,json=outSequence,proto3" json:"out_sequence,omitempty"` ++ // The key for the inbound direction. ++ InKey []byte `protobuf:"bytes,5,opt,name=in_key,json=inKey,proto3" json:"in_key,omitempty"` ++ // The key for the outbound direction. ++ OutKey []byte `protobuf:"bytes,6,opt,name=out_key,json=outKey,proto3" json:"out_key,omitempty"` ++ // The constant part of the record nonce for the outbound direction. ++ InFixedNonce []byte `protobuf:"bytes,7,opt,name=in_fixed_nonce,json=inFixedNonce,proto3" json:"in_fixed_nonce,omitempty"` ++ // The constant part of the record nonce for the inbound direction. ++ OutFixedNonce []byte `protobuf:"bytes,8,opt,name=out_fixed_nonce,json=outFixedNonce,proto3" json:"out_fixed_nonce,omitempty"` ++ // A connection identifier that can be provided to S2A to perform operations ++ // related to this connection. This identifier will be stored by the record ++ // protocol, and included in the |ResumptionTicketReq| message that is later ++ // sent back to S2A. This field is set only for client-side connections. ++ ConnectionId uint64 `protobuf:"varint,9,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` ++ // Set to true if a cached session was reused to do an abbreviated handshake. ++ IsHandshakeResumed bool `protobuf:"varint,10,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` ++} ++ ++func (x *SessionState) Reset() { ++ *x = SessionState{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionState) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionState) ProtoMessage() {} ++ ++func (x *SessionState) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionState.ProtoReflect.Descriptor instead. ++func (*SessionState) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{6} ++} ++ ++func (x *SessionState) GetTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.TlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *SessionState) GetTlsCiphersuite() common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.TlsCiphersuite ++ } ++ return common_go_proto.Ciphersuite(0) ++} ++ ++func (x *SessionState) GetInSequence() uint64 { ++ if x != nil { ++ return x.InSequence ++ } ++ return 0 ++} ++ ++func (x *SessionState) GetOutSequence() uint64 { ++ if x != nil { ++ return x.OutSequence ++ } ++ return 0 ++} ++ ++func (x *SessionState) GetInKey() []byte { ++ if x != nil { ++ return x.InKey ++ } ++ return nil ++} ++ ++func (x *SessionState) GetOutKey() []byte { ++ if x != nil { ++ return x.OutKey ++ } ++ return nil ++} ++ ++func (x *SessionState) GetInFixedNonce() []byte { ++ if x != nil { ++ return x.InFixedNonce ++ } ++ return nil ++} ++ ++func (x *SessionState) GetOutFixedNonce() []byte { ++ if x != nil { ++ return x.OutFixedNonce ++ } ++ return nil ++} ++ ++func (x *SessionState) GetConnectionId() uint64 { ++ if x != nil { ++ return x.ConnectionId ++ } ++ return 0 ++} ++ ++func (x *SessionState) GetIsHandshakeResumed() bool { ++ if x != nil { ++ return x.IsHandshakeResumed ++ } ++ return false ++} ++ ++type SessionResult struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The application protocol negotiated for this session. ++ ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` ++ // The session state at the end. This state contains all cryptographic ++ // material required to initialize the record protocol object. ++ State *SessionState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` ++ // The authenticated identity of the peer. ++ PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` ++ // The local identity used during session setup. This could be: ++ // - The local identity that the client specifies in ClientSessionStartReq. ++ // - One of the local identities that the server specifies in ++ // ServerSessionStartReq. ++ // - If neither client or server specifies local identities, the S2A picks the ++ // default one. In this case, this field will contain that identity. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The SHA256 hash of the local certificate used in the handshake. ++ LocalCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` ++ // The SHA256 hash of the peer certificate used in the handshake. ++ PeerCertFingerprint []byte `protobuf:"bytes,7,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` ++} ++ ++func (x *SessionResult) Reset() { ++ *x = SessionResult{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionResult) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionResult) ProtoMessage() {} ++ ++func (x *SessionResult) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionResult.ProtoReflect.Descriptor instead. ++func (*SessionResult) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{7} ++} ++ ++func (x *SessionResult) GetApplicationProtocol() string { ++ if x != nil { ++ return x.ApplicationProtocol ++ } ++ return "" ++} ++ ++func (x *SessionResult) GetState() *SessionState { ++ if x != nil { ++ return x.State ++ } ++ return nil ++} ++ ++func (x *SessionResult) GetPeerIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.PeerIdentity ++ } ++ return nil ++} ++ ++func (x *SessionResult) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *SessionResult) GetLocalCertFingerprint() []byte { ++ if x != nil { ++ return x.LocalCertFingerprint ++ } ++ return nil ++} ++ ++func (x *SessionResult) GetPeerCertFingerprint() []byte { ++ if x != nil { ++ return x.PeerCertFingerprint ++ } ++ return nil ++} ++ ++type SessionStatus struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The status code that is specific to the application and the implementation ++ // of S2A, e.g., gRPC status code. ++ Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` ++ // The status details. ++ Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` ++} ++ ++func (x *SessionStatus) Reset() { ++ *x = SessionStatus{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionStatus) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionStatus) ProtoMessage() {} ++ ++func (x *SessionStatus) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionStatus.ProtoReflect.Descriptor instead. ++func (*SessionStatus) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{8} ++} ++ ++func (x *SessionStatus) GetCode() uint32 { ++ if x != nil { ++ return x.Code ++ } ++ return 0 ++} ++ ++func (x *SessionStatus) GetDetails() string { ++ if x != nil { ++ return x.Details ++ } ++ return "" ++} ++ ++type SessionResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The local identity used during session setup. This could be: ++ // - The local identity that the client specifies in ClientSessionStartReq. ++ // - One of the local identities that the server specifies in ++ // ServerSessionStartReq. ++ // - If neither client or server specifies local identities, the S2A picks the ++ // default one. In this case, this field will contain that identity. ++ // ++ // If the SessionResult is populated, then this must coincide with the local ++ // identity specified in the SessionResult; otherwise, the handshake must ++ // fail. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The byte representation of the frames that should be sent to the peer. May ++ // be empty if nothing needs to be sent to the peer or if in_bytes in the ++ // SessionReq is incomplete. All bytes in a non-empty out_frames must be sent ++ // to the peer even if the session setup status is not OK as these frames may ++ // contain appropriate alerts. ++ OutFrames []byte `protobuf:"bytes,2,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` ++ // Number of bytes in the in_bytes field that are consumed by S2A. It is ++ // possible that part of in_bytes is unrelated to the session setup process. ++ BytesConsumed uint32 `protobuf:"varint,3,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` ++ // This is set if the session is successfully set up. out_frames may ++ // still be set to frames that needs to be forwarded to the peer. ++ Result *SessionResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` ++ // Status of session setup at the current stage. ++ Status *SessionStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` ++} ++ ++func (x *SessionResp) Reset() { ++ *x = SessionResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionResp) ProtoMessage() {} ++ ++func (x *SessionResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. ++func (*SessionResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{9} ++} ++ ++func (x *SessionResp) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetOutFrames() []byte { ++ if x != nil { ++ return x.OutFrames ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetBytesConsumed() uint32 { ++ if x != nil { ++ return x.BytesConsumed ++ } ++ return 0 ++} ++ ++func (x *SessionResp) GetResult() *SessionResult { ++ if x != nil { ++ return x.Result ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetStatus() *SessionStatus { ++ if x != nil { ++ return x.Status ++ } ++ return nil ++} ++ ++var File_internal_proto_s2a_s2a_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_s2a_s2a_proto_rawDesc = []byte{ ++ 0x0a, 0x1c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, ++ 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, ++ 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, ++ 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x75, 0x0a, ++ 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, ++ 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, ++ 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, ++ 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, ++ 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, ++ 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, ++ 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xac, 0x03, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, ++ 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, ++ 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, ++ 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, ++ 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, ++ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, ++ 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, ++ 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, ++ 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, ++ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, ++ 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, ++ 0x6e, 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, ++ 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, ++ 0x69, 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, ++ 0x69, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x11, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, ++ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, ++ 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, ++ 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, ++ 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, ++ 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, ++ 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, ++ 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, ++ 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, ++ 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, ++ 0x61, 0x6d, 0x65, 0x22, 0xe8, 0x02, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, ++ 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, ++ 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, ++ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, ++ 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, ++ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, ++ 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, ++ 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, ++ 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, ++ 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, ++ 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, ++ 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, ++ 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, ++ 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, ++ 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, ++ 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, ++ 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, ++ 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, ++ 0x0a, 0x0e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x71, ++ 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, ++ 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x13, ++ 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, ++ 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, ++ 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, ++ 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, ++ 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, ++ 0x6e, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, ++ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, ++ 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, ++ 0xf4, 0x02, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, ++ 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, ++ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, ++ 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, ++ 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, ++ 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, ++ 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, ++ 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x04, ++ 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, ++ 0x78, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x12, 0x4d, 0x0a, ++ 0x11, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, ++ 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, ++ 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x10, 0x72, 0x65, 0x73, 0x75, ++ 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x4b, 0x0a, 0x0f, ++ 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, ++ 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, ++ 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x4d, ++ 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, ++ 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa0, 0x03, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69, ++ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, ++ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, ++ 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, ++ 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, ++ 0x3f, 0x0a, 0x0f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, ++ 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, ++ 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, ++ 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, ++ 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x69, 0x6e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, ++ 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, ++ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x71, 0x75, ++ 0x65, 0x6e, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, ++ 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6f, ++ 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, ++ 0x74, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, ++ 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, ++ 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x75, ++ 0x74, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, ++ 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, ++ 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, ++ 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, ++ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, ++ 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, ++ 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, ++ 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x22, 0xd1, 0x02, 0x0a, 0x0d, 0x53, 0x65, ++ 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, ++ 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, ++ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2d, ++ 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, ++ 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, ++ 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, ++ 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, ++ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, ++ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, ++ 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, ++ 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, ++ 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, ++ 0x69, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, ++ 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, ++ 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, ++ 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, ++ 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, ++ 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, ++ 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, 0x3d, 0x0a, ++ 0x0d, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, ++ 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, ++ 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, ++ 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, ++ 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3a, 0x0a, 0x0e, ++ 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, ++ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, ++ 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, ++ 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, ++ 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, ++ 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, ++ 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x30, ++ 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, ++ 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, ++ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, ++ 0x12, 0x30, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, ++ 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, ++ 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, ++ 0x75, 0x73, 0x32, 0x51, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, ++ 0x12, 0x43, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, ++ 0x12, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, ++ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, ++ 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, ++ 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, ++ 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, ++ 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_s2a_s2a_proto_rawDescOnce sync.Once ++ file_internal_proto_s2a_s2a_proto_rawDescData = file_internal_proto_s2a_s2a_proto_rawDesc ++) ++ ++func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { ++ file_internal_proto_s2a_s2a_proto_rawDescOnce.Do(func() { ++ file_internal_proto_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_s2a_proto_rawDescData) ++ }) ++ return file_internal_proto_s2a_s2a_proto_rawDescData ++} ++ ++var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) ++var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ ++ (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism ++ (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq ++ (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq ++ (*SessionNextReq)(nil), // 3: s2a.proto.SessionNextReq ++ (*ResumptionTicketReq)(nil), // 4: s2a.proto.ResumptionTicketReq ++ (*SessionReq)(nil), // 5: s2a.proto.SessionReq ++ (*SessionState)(nil), // 6: s2a.proto.SessionState ++ (*SessionResult)(nil), // 7: s2a.proto.SessionResult ++ (*SessionStatus)(nil), // 8: s2a.proto.SessionStatus ++ (*SessionResp)(nil), // 9: s2a.proto.SessionResp ++ (*common_go_proto.Identity)(nil), // 10: s2a.proto.Identity ++ (common_go_proto.TLSVersion)(0), // 11: s2a.proto.TLSVersion ++ (common_go_proto.Ciphersuite)(0), // 12: s2a.proto.Ciphersuite ++} ++var file_internal_proto_s2a_s2a_proto_depIdxs = []int32{ ++ 10, // 0: s2a.proto.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity ++ 11, // 1: s2a.proto.ClientSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion ++ 11, // 2: s2a.proto.ClientSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion ++ 12, // 3: s2a.proto.ClientSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite ++ 10, // 4: s2a.proto.ClientSessionStartReq.target_identities:type_name -> s2a.proto.Identity ++ 10, // 5: s2a.proto.ClientSessionStartReq.local_identity:type_name -> s2a.proto.Identity ++ 11, // 6: s2a.proto.ServerSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion ++ 11, // 7: s2a.proto.ServerSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion ++ 12, // 8: s2a.proto.ServerSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite ++ 10, // 9: s2a.proto.ServerSessionStartReq.local_identities:type_name -> s2a.proto.Identity ++ 10, // 10: s2a.proto.ResumptionTicketReq.local_identity:type_name -> s2a.proto.Identity ++ 1, // 11: s2a.proto.SessionReq.client_start:type_name -> s2a.proto.ClientSessionStartReq ++ 2, // 12: s2a.proto.SessionReq.server_start:type_name -> s2a.proto.ServerSessionStartReq ++ 3, // 13: s2a.proto.SessionReq.next:type_name -> s2a.proto.SessionNextReq ++ 4, // 14: s2a.proto.SessionReq.resumption_ticket:type_name -> s2a.proto.ResumptionTicketReq ++ 0, // 15: s2a.proto.SessionReq.auth_mechanisms:type_name -> s2a.proto.AuthenticationMechanism ++ 11, // 16: s2a.proto.SessionState.tls_version:type_name -> s2a.proto.TLSVersion ++ 12, // 17: s2a.proto.SessionState.tls_ciphersuite:type_name -> s2a.proto.Ciphersuite ++ 6, // 18: s2a.proto.SessionResult.state:type_name -> s2a.proto.SessionState ++ 10, // 19: s2a.proto.SessionResult.peer_identity:type_name -> s2a.proto.Identity ++ 10, // 20: s2a.proto.SessionResult.local_identity:type_name -> s2a.proto.Identity ++ 10, // 21: s2a.proto.SessionResp.local_identity:type_name -> s2a.proto.Identity ++ 7, // 22: s2a.proto.SessionResp.result:type_name -> s2a.proto.SessionResult ++ 8, // 23: s2a.proto.SessionResp.status:type_name -> s2a.proto.SessionStatus ++ 5, // 24: s2a.proto.S2AService.SetUpSession:input_type -> s2a.proto.SessionReq ++ 9, // 25: s2a.proto.S2AService.SetUpSession:output_type -> s2a.proto.SessionResp ++ 25, // [25:26] is the sub-list for method output_type ++ 24, // [24:25] is the sub-list for method input_type ++ 24, // [24:24] is the sub-list for extension type_name ++ 24, // [24:24] is the sub-list for extension extendee ++ 0, // [0:24] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_s2a_s2a_proto_init() } ++func file_internal_proto_s2a_s2a_proto_init() { ++ if File_internal_proto_s2a_s2a_proto != nil { ++ return ++ } ++ if !protoimpl.UnsafeEnabled { ++ file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*AuthenticationMechanism); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ClientSessionStartReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ServerSessionStartReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionNextReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ResumptionTicketReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionState); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionResult); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionStatus); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ ++ (*AuthenticationMechanism_Token)(nil), ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ ++ (*SessionReq_ClientStart)(nil), ++ (*SessionReq_ServerStart)(nil), ++ (*SessionReq_Next)(nil), ++ (*SessionReq_ResumptionTicket)(nil), ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_s2a_s2a_proto_rawDesc, ++ NumEnums: 0, ++ NumMessages: 10, ++ NumExtensions: 0, ++ NumServices: 1, ++ }, ++ GoTypes: file_internal_proto_s2a_s2a_proto_goTypes, ++ DependencyIndexes: file_internal_proto_s2a_s2a_proto_depIdxs, ++ MessageInfos: file_internal_proto_s2a_s2a_proto_msgTypes, ++ }.Build() ++ File_internal_proto_s2a_s2a_proto = out.File ++ file_internal_proto_s2a_s2a_proto_rawDesc = nil ++ file_internal_proto_s2a_s2a_proto_goTypes = nil ++ file_internal_proto_s2a_s2a_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +new file mode 100644 +index 00000000000..0fa582fc874 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +@@ -0,0 +1,173 @@ ++// Copyright 2021 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go-grpc. DO NOT EDIT. ++// versions: ++// - protoc-gen-go-grpc v1.3.0 ++// - protoc v3.21.12 ++// source: internal/proto/s2a/s2a.proto ++ ++package s2a_go_proto ++ ++import ( ++ context "context" ++ grpc "google.golang.org/grpc" ++ codes "google.golang.org/grpc/codes" ++ status "google.golang.org/grpc/status" ++) ++ ++// This is a compile-time assertion to ensure that this generated file ++// is compatible with the grpc package it is being compiled against. ++// Requires gRPC-Go v1.32.0 or later. ++const _ = grpc.SupportPackageIsVersion7 ++ ++const ( ++ S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" ++) ++ ++// S2AServiceClient is the client API for S2AService service. ++// ++// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. ++type S2AServiceClient interface { ++ // S2A service accepts a stream of session setup requests and returns a stream ++ // of session setup responses. The client of this service is expected to send ++ // exactly one client_start or server_start message followed by at least one ++ // next message. Applications running TLS clients can send requests with ++ // resumption_ticket messages only after the session is successfully set up. ++ // ++ // Every time S2A client sends a request, this service sends a response. ++ // However, clients do not have to wait for service response before sending ++ // the next request. ++ SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) ++} ++ ++type s2AServiceClient struct { ++ cc grpc.ClientConnInterface ++} ++ ++func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { ++ return &s2AServiceClient{cc} ++} ++ ++func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { ++ stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) ++ if err != nil { ++ return nil, err ++ } ++ x := &s2AServiceSetUpSessionClient{stream} ++ return x, nil ++} ++ ++type S2AService_SetUpSessionClient interface { ++ Send(*SessionReq) error ++ Recv() (*SessionResp, error) ++ grpc.ClientStream ++} ++ ++type s2AServiceSetUpSessionClient struct { ++ grpc.ClientStream ++} ++ ++func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { ++ return x.ClientStream.SendMsg(m) ++} ++ ++func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { ++ m := new(SessionResp) ++ if err := x.ClientStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++// S2AServiceServer is the server API for S2AService service. ++// All implementations must embed UnimplementedS2AServiceServer ++// for forward compatibility ++type S2AServiceServer interface { ++ // S2A service accepts a stream of session setup requests and returns a stream ++ // of session setup responses. The client of this service is expected to send ++ // exactly one client_start or server_start message followed by at least one ++ // next message. Applications running TLS clients can send requests with ++ // resumption_ticket messages only after the session is successfully set up. ++ // ++ // Every time S2A client sends a request, this service sends a response. ++ // However, clients do not have to wait for service response before sending ++ // the next request. ++ SetUpSession(S2AService_SetUpSessionServer) error ++ mustEmbedUnimplementedS2AServiceServer() ++} ++ ++// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. ++type UnimplementedS2AServiceServer struct { ++} ++ ++func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { ++ return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") ++} ++func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} ++ ++// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. ++// Use of this interface is not recommended, as added methods to S2AServiceServer will ++// result in compilation errors. ++type UnsafeS2AServiceServer interface { ++ mustEmbedUnimplementedS2AServiceServer() ++} ++ ++func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { ++ s.RegisterService(&S2AService_ServiceDesc, srv) ++} ++ ++func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { ++ return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) ++} ++ ++type S2AService_SetUpSessionServer interface { ++ Send(*SessionResp) error ++ Recv() (*SessionReq, error) ++ grpc.ServerStream ++} ++ ++type s2AServiceSetUpSessionServer struct { ++ grpc.ServerStream ++} ++ ++func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { ++ return x.ServerStream.SendMsg(m) ++} ++ ++func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { ++ m := new(SessionReq) ++ if err := x.ServerStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. ++// It's only intended for direct use with grpc.RegisterService, ++// and not to be introspected or modified (even as a copy) ++var S2AService_ServiceDesc = grpc.ServiceDesc{ ++ ServiceName: "s2a.proto.S2AService", ++ HandlerType: (*S2AServiceServer)(nil), ++ Methods: []grpc.MethodDesc{}, ++ Streams: []grpc.StreamDesc{ ++ { ++ StreamName: "SetUpSession", ++ Handler: _S2AService_SetUpSession_Handler, ++ ServerStreams: true, ++ ClientStreams: true, ++ }, ++ }, ++ Metadata: "internal/proto/s2a/s2a.proto", ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +new file mode 100644 +index 00000000000..c84bed97748 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +@@ -0,0 +1,367 @@ ++// Copyright 2022 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/v2/common/common.proto ++ ++package common_go_proto ++ ++import ( ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++// The TLS 1.0-1.2 ciphersuites that the application can negotiate when using ++// S2A. ++type Ciphersuite int32 ++ ++const ( ++ Ciphersuite_CIPHERSUITE_UNSPECIFIED Ciphersuite = 0 ++ Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 1 ++ Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 2 ++ Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 3 ++ Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 4 ++ Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 5 ++ Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 6 ++) ++ ++// Enum value maps for Ciphersuite. ++var ( ++ Ciphersuite_name = map[int32]string{ ++ 0: "CIPHERSUITE_UNSPECIFIED", ++ 1: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", ++ 2: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", ++ 3: "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", ++ 4: "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256", ++ 5: "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384", ++ 6: "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", ++ } ++ Ciphersuite_value = map[string]int32{ ++ "CIPHERSUITE_UNSPECIFIED": 0, ++ "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": 1, ++ "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": 2, ++ "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": 3, ++ "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256": 4, ++ "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384": 5, ++ "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": 6, ++ } ++) ++ ++func (x Ciphersuite) Enum() *Ciphersuite { ++ p := new(Ciphersuite) ++ *p = x ++ return p ++} ++ ++func (x Ciphersuite) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_common_common_proto_enumTypes[0].Descriptor() ++} ++ ++func (Ciphersuite) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_common_common_proto_enumTypes[0] ++} ++ ++func (x Ciphersuite) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use Ciphersuite.Descriptor instead. ++func (Ciphersuite) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} ++} ++ ++// The TLS versions supported by S2A's handshaker module. ++type TLSVersion int32 ++ ++const ( ++ TLSVersion_TLS_VERSION_UNSPECIFIED TLSVersion = 0 ++ TLSVersion_TLS_VERSION_1_0 TLSVersion = 1 ++ TLSVersion_TLS_VERSION_1_1 TLSVersion = 2 ++ TLSVersion_TLS_VERSION_1_2 TLSVersion = 3 ++ TLSVersion_TLS_VERSION_1_3 TLSVersion = 4 ++) ++ ++// Enum value maps for TLSVersion. ++var ( ++ TLSVersion_name = map[int32]string{ ++ 0: "TLS_VERSION_UNSPECIFIED", ++ 1: "TLS_VERSION_1_0", ++ 2: "TLS_VERSION_1_1", ++ 3: "TLS_VERSION_1_2", ++ 4: "TLS_VERSION_1_3", ++ } ++ TLSVersion_value = map[string]int32{ ++ "TLS_VERSION_UNSPECIFIED": 0, ++ "TLS_VERSION_1_0": 1, ++ "TLS_VERSION_1_1": 2, ++ "TLS_VERSION_1_2": 3, ++ "TLS_VERSION_1_3": 4, ++ } ++) ++ ++func (x TLSVersion) Enum() *TLSVersion { ++ p := new(TLSVersion) ++ *p = x ++ return p ++} ++ ++func (x TLSVersion) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_common_common_proto_enumTypes[1].Descriptor() ++} ++ ++func (TLSVersion) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_common_common_proto_enumTypes[1] ++} ++ ++func (x TLSVersion) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use TLSVersion.Descriptor instead. ++func (TLSVersion) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{1} ++} ++ ++// The side in the TLS connection. ++type ConnectionSide int32 ++ ++const ( ++ ConnectionSide_CONNECTION_SIDE_UNSPECIFIED ConnectionSide = 0 ++ ConnectionSide_CONNECTION_SIDE_CLIENT ConnectionSide = 1 ++ ConnectionSide_CONNECTION_SIDE_SERVER ConnectionSide = 2 ++) ++ ++// Enum value maps for ConnectionSide. ++var ( ++ ConnectionSide_name = map[int32]string{ ++ 0: "CONNECTION_SIDE_UNSPECIFIED", ++ 1: "CONNECTION_SIDE_CLIENT", ++ 2: "CONNECTION_SIDE_SERVER", ++ } ++ ConnectionSide_value = map[string]int32{ ++ "CONNECTION_SIDE_UNSPECIFIED": 0, ++ "CONNECTION_SIDE_CLIENT": 1, ++ "CONNECTION_SIDE_SERVER": 2, ++ } ++) ++ ++func (x ConnectionSide) Enum() *ConnectionSide { ++ p := new(ConnectionSide) ++ *p = x ++ return p ++} ++ ++func (x ConnectionSide) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (ConnectionSide) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_common_common_proto_enumTypes[2].Descriptor() ++} ++ ++func (ConnectionSide) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_common_common_proto_enumTypes[2] ++} ++ ++func (x ConnectionSide) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use ConnectionSide.Descriptor instead. ++func (ConnectionSide) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{2} ++} ++ ++// The ALPN protocols that the application can negotiate during a TLS handshake. ++type AlpnProtocol int32 ++ ++const ( ++ AlpnProtocol_ALPN_PROTOCOL_UNSPECIFIED AlpnProtocol = 0 ++ AlpnProtocol_ALPN_PROTOCOL_GRPC AlpnProtocol = 1 ++ AlpnProtocol_ALPN_PROTOCOL_HTTP2 AlpnProtocol = 2 ++ AlpnProtocol_ALPN_PROTOCOL_HTTP1_1 AlpnProtocol = 3 ++) ++ ++// Enum value maps for AlpnProtocol. ++var ( ++ AlpnProtocol_name = map[int32]string{ ++ 0: "ALPN_PROTOCOL_UNSPECIFIED", ++ 1: "ALPN_PROTOCOL_GRPC", ++ 2: "ALPN_PROTOCOL_HTTP2", ++ 3: "ALPN_PROTOCOL_HTTP1_1", ++ } ++ AlpnProtocol_value = map[string]int32{ ++ "ALPN_PROTOCOL_UNSPECIFIED": 0, ++ "ALPN_PROTOCOL_GRPC": 1, ++ "ALPN_PROTOCOL_HTTP2": 2, ++ "ALPN_PROTOCOL_HTTP1_1": 3, ++ } ++) ++ ++func (x AlpnProtocol) Enum() *AlpnProtocol { ++ p := new(AlpnProtocol) ++ *p = x ++ return p ++} ++ ++func (x AlpnProtocol) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (AlpnProtocol) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_common_common_proto_enumTypes[3].Descriptor() ++} ++ ++func (AlpnProtocol) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_common_common_proto_enumTypes[3] ++} ++ ++func (x AlpnProtocol) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use AlpnProtocol.Descriptor instead. ++func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} ++} ++ ++var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ ++ 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, ++ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, ++ 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, ++ 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, ++ 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, ++ 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, ++ 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, ++ 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, ++ 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, ++ 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, ++ 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, ++ 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, ++ 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, ++ 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, ++ 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, ++ 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, ++ 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, ++ 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, ++ 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, ++ 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, ++ 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, ++ 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, ++ 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, ++ 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, ++ 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, ++ 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, ++ 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, ++ 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, ++ 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, ++ 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, ++ 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, ++ 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, ++ 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, ++ 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, ++ 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, ++ 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, ++ 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, ++ 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, ++ 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, ++ 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, ++ 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, ++ 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, ++ 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, ++ 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, ++ 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, ++ 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, ++ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, ++ 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, ++ 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_v2_common_common_proto_rawDescOnce sync.Once ++ file_internal_proto_v2_common_common_proto_rawDescData = file_internal_proto_v2_common_common_proto_rawDesc ++) ++ ++func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { ++ file_internal_proto_v2_common_common_proto_rawDescOnce.Do(func() { ++ file_internal_proto_v2_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_common_common_proto_rawDescData) ++ }) ++ return file_internal_proto_v2_common_common_proto_rawDescData ++} ++ ++var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) ++var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ ++ (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite ++ (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion ++ (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide ++ (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol ++} ++var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ ++ 0, // [0:0] is the sub-list for method output_type ++ 0, // [0:0] is the sub-list for method input_type ++ 0, // [0:0] is the sub-list for extension type_name ++ 0, // [0:0] is the sub-list for extension extendee ++ 0, // [0:0] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_v2_common_common_proto_init() } ++func file_internal_proto_v2_common_common_proto_init() { ++ if File_internal_proto_v2_common_common_proto != nil { ++ return ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, ++ NumEnums: 4, ++ NumMessages: 0, ++ NumExtensions: 0, ++ NumServices: 0, ++ }, ++ GoTypes: file_internal_proto_v2_common_common_proto_goTypes, ++ DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, ++ EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, ++ }.Build() ++ File_internal_proto_v2_common_common_proto = out.File ++ file_internal_proto_v2_common_common_proto_rawDesc = nil ++ file_internal_proto_v2_common_common_proto_goTypes = nil ++ file_internal_proto_v2_common_common_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +new file mode 100644 +index 00000000000..b7fd871c7a7 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +@@ -0,0 +1,248 @@ ++// Copyright 2022 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/v2/s2a_context/s2a_context.proto ++ ++package s2a_context_go_proto ++ ++import ( ++ common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++type S2AContext struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The SPIFFE ID from the peer leaf certificate, if present. ++ // ++ // This field is only populated if the leaf certificate is a valid SPIFFE ++ // SVID; in particular, there is a unique URI SAN and this URI SAN is a valid ++ // SPIFFE ID. ++ LeafCertSpiffeId string `protobuf:"bytes,1,opt,name=leaf_cert_spiffe_id,json=leafCertSpiffeId,proto3" json:"leaf_cert_spiffe_id,omitempty"` ++ // The URIs that are present in the SubjectAltName extension of the peer leaf ++ // certificate. ++ // ++ // Note that the extracted URIs are not validated and may not be properly ++ // formatted. ++ LeafCertUris []string `protobuf:"bytes,2,rep,name=leaf_cert_uris,json=leafCertUris,proto3" json:"leaf_cert_uris,omitempty"` ++ // The DNSNames that are present in the SubjectAltName extension of the peer ++ // leaf certificate. ++ LeafCertDnsnames []string `protobuf:"bytes,3,rep,name=leaf_cert_dnsnames,json=leafCertDnsnames,proto3" json:"leaf_cert_dnsnames,omitempty"` ++ // The (ordered) list of fingerprints in the certificate chain used to verify ++ // the given leaf certificate. The order MUST be from leaf certificate ++ // fingerprint to root certificate fingerprint. ++ // ++ // A fingerprint is the base-64 encoding of the SHA256 hash of the ++ // DER-encoding of a certificate. The list MAY be populated even if the peer ++ // certificate chain was NOT validated successfully. ++ PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` ++ // The local identity used during session setup. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The SHA256 hash of the DER-encoding of the local leaf certificate used in ++ // the handshake. ++ LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` ++} ++ ++func (x *S2AContext) Reset() { ++ *x = S2AContext{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *S2AContext) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*S2AContext) ProtoMessage() {} ++ ++func (x *S2AContext) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. ++func (*S2AContext) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} ++} ++ ++func (x *S2AContext) GetLeafCertSpiffeId() string { ++ if x != nil { ++ return x.LeafCertSpiffeId ++ } ++ return "" ++} ++ ++func (x *S2AContext) GetLeafCertUris() []string { ++ if x != nil { ++ return x.LeafCertUris ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetLeafCertDnsnames() []string { ++ if x != nil { ++ return x.LeafCertDnsnames ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetPeerCertificateChainFingerprints() []string { ++ if x != nil { ++ return x.PeerCertificateChainFingerprints ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetLocalLeafCertFingerprint() []byte { ++ if x != nil { ++ return x.LocalLeafCertFingerprint ++ } ++ return nil ++} ++ ++var File_internal_proto_v2_s2a_context_s2a_context_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ ++ 0x0a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, ++ 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, ++ 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, ++ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, ++ 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, ++ 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, ++ 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, ++ 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, ++ 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, ++ 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, ++ 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, ++ 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, ++ 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, ++ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, ++ 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, ++ 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, ++ 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, ++ 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, ++ 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, ++ 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, ++ 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, ++ 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, ++ 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, ++ 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, ++ 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, ++ 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, ++ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, ++ 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, ++ 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, ++ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce sync.Once ++ file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc ++) ++ ++func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { ++ file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { ++ file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData) ++ }) ++ return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData ++} ++ ++var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) ++var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ ++ (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext ++ (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity ++} ++var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ ++ 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity ++ 1, // [1:1] is the sub-list for method output_type ++ 1, // [1:1] is the sub-list for method input_type ++ 1, // [1:1] is the sub-list for extension type_name ++ 1, // [1:1] is the sub-list for extension extendee ++ 0, // [0:1] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_v2_s2a_context_s2a_context_proto_init() } ++func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { ++ if File_internal_proto_v2_s2a_context_s2a_context_proto != nil { ++ return ++ } ++ if !protoimpl.UnsafeEnabled { ++ file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*S2AContext); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc, ++ NumEnums: 0, ++ NumMessages: 1, ++ NumExtensions: 0, ++ NumServices: 0, ++ }, ++ GoTypes: file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes, ++ DependencyIndexes: file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs, ++ MessageInfos: file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes, ++ }.Build() ++ File_internal_proto_v2_s2a_context_s2a_context_proto = out.File ++ file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = nil ++ file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = nil ++ file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +new file mode 100644 +index 00000000000..e843450c7ed +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +@@ -0,0 +1,2494 @@ ++// Copyright 2022 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/v2/s2a/s2a.proto ++ ++package s2a_go_proto ++ ++import ( ++ common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" ++ common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ++ s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++type SignatureAlgorithm int32 ++ ++const ( ++ SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED SignatureAlgorithm = 0 ++ // RSA Public-Key Cryptography Standards #1. ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256 SignatureAlgorithm = 1 ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384 SignatureAlgorithm = 2 ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512 SignatureAlgorithm = 3 ++ // ECDSA. ++ SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256 SignatureAlgorithm = 4 ++ SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384 SignatureAlgorithm = 5 ++ SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512 SignatureAlgorithm = 6 ++ // RSA Probabilistic Signature Scheme. ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256 SignatureAlgorithm = 7 ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384 SignatureAlgorithm = 8 ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512 SignatureAlgorithm = 9 ++ // ED25519. ++ SignatureAlgorithm_S2A_SSL_SIGN_ED25519 SignatureAlgorithm = 10 ++) ++ ++// Enum value maps for SignatureAlgorithm. ++var ( ++ SignatureAlgorithm_name = map[int32]string{ ++ 0: "S2A_SSL_SIGN_UNSPECIFIED", ++ 1: "S2A_SSL_SIGN_RSA_PKCS1_SHA256", ++ 2: "S2A_SSL_SIGN_RSA_PKCS1_SHA384", ++ 3: "S2A_SSL_SIGN_RSA_PKCS1_SHA512", ++ 4: "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256", ++ 5: "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384", ++ 6: "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512", ++ 7: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256", ++ 8: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384", ++ 9: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512", ++ 10: "S2A_SSL_SIGN_ED25519", ++ } ++ SignatureAlgorithm_value = map[string]int32{ ++ "S2A_SSL_SIGN_UNSPECIFIED": 0, ++ "S2A_SSL_SIGN_RSA_PKCS1_SHA256": 1, ++ "S2A_SSL_SIGN_RSA_PKCS1_SHA384": 2, ++ "S2A_SSL_SIGN_RSA_PKCS1_SHA512": 3, ++ "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256": 4, ++ "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384": 5, ++ "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512": 6, ++ "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256": 7, ++ "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384": 8, ++ "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512": 9, ++ "S2A_SSL_SIGN_ED25519": 10, ++ } ++) ++ ++func (x SignatureAlgorithm) Enum() *SignatureAlgorithm { ++ p := new(SignatureAlgorithm) ++ *p = x ++ return p ++} ++ ++func (x SignatureAlgorithm) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (SignatureAlgorithm) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[0].Descriptor() ++} ++ ++func (SignatureAlgorithm) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[0] ++} ++ ++func (x SignatureAlgorithm) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use SignatureAlgorithm.Descriptor instead. ++func (SignatureAlgorithm) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} ++} ++ ++type GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate int32 ++ ++const ( ++ GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 0 ++ GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 1 ++ GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 2 ++ GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 3 ++ GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 4 ++ GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 5 ++) ++ ++// Enum value maps for GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate. ++var ( ++ GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_name = map[int32]string{ ++ 0: "UNSPECIFIED", ++ 1: "DONT_REQUEST_CLIENT_CERTIFICATE", ++ 2: "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", ++ 3: "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY", ++ 4: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", ++ 5: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY", ++ } ++ GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_value = map[string]int32{ ++ "UNSPECIFIED": 0, ++ "DONT_REQUEST_CLIENT_CERTIFICATE": 1, ++ "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 2, ++ "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY": 3, ++ "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 4, ++ "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY": 5, ++ } ++) ++ ++func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Enum() *GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { ++ p := new(GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) ++ *p = x ++ return p ++} ++ ++func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[1].Descriptor() ++} ++ ++func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[1] ++} ++ ++func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate.Descriptor instead. ++func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1, 0} ++} ++ ++type OffloadPrivateKeyOperationReq_PrivateKeyOperation int32 ++ ++const ( ++ OffloadPrivateKeyOperationReq_UNSPECIFIED OffloadPrivateKeyOperationReq_PrivateKeyOperation = 0 ++ // When performing a TLS 1.2 or 1.3 handshake, the (partial) transcript of ++ // the TLS handshake must be signed to prove possession of the private key. ++ // ++ // See https://www.rfc-editor.org/rfc/rfc8446.html#section-4.4.3. ++ OffloadPrivateKeyOperationReq_SIGN OffloadPrivateKeyOperationReq_PrivateKeyOperation = 1 ++ // When performing a TLS 1.2 handshake using an RSA algorithm, the key ++ // exchange algorithm involves the client generating a premaster secret, ++ // encrypting it using the server's public key, and sending this encrypted ++ // blob to the server in a ClientKeyExchange message. ++ // ++ // See https://www.rfc-editor.org/rfc/rfc4346#section-7.4.7.1. ++ OffloadPrivateKeyOperationReq_DECRYPT OffloadPrivateKeyOperationReq_PrivateKeyOperation = 2 ++) ++ ++// Enum value maps for OffloadPrivateKeyOperationReq_PrivateKeyOperation. ++var ( ++ OffloadPrivateKeyOperationReq_PrivateKeyOperation_name = map[int32]string{ ++ 0: "UNSPECIFIED", ++ 1: "SIGN", ++ 2: "DECRYPT", ++ } ++ OffloadPrivateKeyOperationReq_PrivateKeyOperation_value = map[string]int32{ ++ "UNSPECIFIED": 0, ++ "SIGN": 1, ++ "DECRYPT": 2, ++ } ++) ++ ++func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Enum() *OffloadPrivateKeyOperationReq_PrivateKeyOperation { ++ p := new(OffloadPrivateKeyOperationReq_PrivateKeyOperation) ++ *p = x ++ return p ++} ++ ++func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[2].Descriptor() ++} ++ ++func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[2] ++} ++ ++func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use OffloadPrivateKeyOperationReq_PrivateKeyOperation.Descriptor instead. ++func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5, 0} ++} ++ ++type OffloadResumptionKeyOperationReq_ResumptionKeyOperation int32 ++ ++const ( ++ OffloadResumptionKeyOperationReq_UNSPECIFIED OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 0 ++ OffloadResumptionKeyOperationReq_ENCRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 1 ++ OffloadResumptionKeyOperationReq_DECRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 2 ++) ++ ++// Enum value maps for OffloadResumptionKeyOperationReq_ResumptionKeyOperation. ++var ( ++ OffloadResumptionKeyOperationReq_ResumptionKeyOperation_name = map[int32]string{ ++ 0: "UNSPECIFIED", ++ 1: "ENCRYPT", ++ 2: "DECRYPT", ++ } ++ OffloadResumptionKeyOperationReq_ResumptionKeyOperation_value = map[string]int32{ ++ "UNSPECIFIED": 0, ++ "ENCRYPT": 1, ++ "DECRYPT": 2, ++ } ++) ++ ++func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Enum() *OffloadResumptionKeyOperationReq_ResumptionKeyOperation { ++ p := new(OffloadResumptionKeyOperationReq_ResumptionKeyOperation) ++ *p = x ++ return p ++} ++ ++func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[3].Descriptor() ++} ++ ++func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[3] ++} ++ ++func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use OffloadResumptionKeyOperationReq_ResumptionKeyOperation.Descriptor instead. ++func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7, 0} ++} ++ ++type ValidatePeerCertificateChainReq_VerificationMode int32 ++ ++const ( ++ // The default verification mode supported by S2A. ++ ValidatePeerCertificateChainReq_UNSPECIFIED ValidatePeerCertificateChainReq_VerificationMode = 0 ++ // The SPIFFE verification mode selects the set of trusted certificates to ++ // use for path building based on the SPIFFE trust domain in the peer's leaf ++ // certificate. ++ ValidatePeerCertificateChainReq_SPIFFE ValidatePeerCertificateChainReq_VerificationMode = 1 ++ // The connect-to-Google verification mode uses the trust bundle for ++ // connecting to Google, e.g. *.mtls.googleapis.com endpoints. ++ ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 ++) ++ ++// Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. ++var ( ++ ValidatePeerCertificateChainReq_VerificationMode_name = map[int32]string{ ++ 0: "UNSPECIFIED", ++ 1: "SPIFFE", ++ 2: "CONNECT_TO_GOOGLE", ++ } ++ ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ ++ "UNSPECIFIED": 0, ++ "SPIFFE": 1, ++ "CONNECT_TO_GOOGLE": 2, ++ } ++) ++ ++func (x ValidatePeerCertificateChainReq_VerificationMode) Enum() *ValidatePeerCertificateChainReq_VerificationMode { ++ p := new(ValidatePeerCertificateChainReq_VerificationMode) ++ *p = x ++ return p ++} ++ ++func (x ValidatePeerCertificateChainReq_VerificationMode) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (ValidatePeerCertificateChainReq_VerificationMode) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[4].Descriptor() ++} ++ ++func (ValidatePeerCertificateChainReq_VerificationMode) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[4] ++} ++ ++func (x ValidatePeerCertificateChainReq_VerificationMode) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainReq_VerificationMode.Descriptor instead. ++func (ValidatePeerCertificateChainReq_VerificationMode) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} ++} ++ ++type ValidatePeerCertificateChainResp_ValidationResult int32 ++ ++const ( ++ ValidatePeerCertificateChainResp_UNSPECIFIED ValidatePeerCertificateChainResp_ValidationResult = 0 ++ ValidatePeerCertificateChainResp_SUCCESS ValidatePeerCertificateChainResp_ValidationResult = 1 ++ ValidatePeerCertificateChainResp_FAILURE ValidatePeerCertificateChainResp_ValidationResult = 2 ++) ++ ++// Enum value maps for ValidatePeerCertificateChainResp_ValidationResult. ++var ( ++ ValidatePeerCertificateChainResp_ValidationResult_name = map[int32]string{ ++ 0: "UNSPECIFIED", ++ 1: "SUCCESS", ++ 2: "FAILURE", ++ } ++ ValidatePeerCertificateChainResp_ValidationResult_value = map[string]int32{ ++ "UNSPECIFIED": 0, ++ "SUCCESS": 1, ++ "FAILURE": 2, ++ } ++) ++ ++func (x ValidatePeerCertificateChainResp_ValidationResult) Enum() *ValidatePeerCertificateChainResp_ValidationResult { ++ p := new(ValidatePeerCertificateChainResp_ValidationResult) ++ *p = x ++ return p ++} ++ ++func (x ValidatePeerCertificateChainResp_ValidationResult) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (ValidatePeerCertificateChainResp_ValidationResult) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[5].Descriptor() ++} ++ ++func (ValidatePeerCertificateChainResp_ValidationResult) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[5] ++} ++ ++func (x ValidatePeerCertificateChainResp_ValidationResult) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainResp_ValidationResult.Descriptor instead. ++func (ValidatePeerCertificateChainResp_ValidationResult) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10, 0} ++} ++ ++type AlpnPolicy struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // If true, the application MUST perform ALPN negotiation. ++ EnableAlpnNegotiation bool `protobuf:"varint,1,opt,name=enable_alpn_negotiation,json=enableAlpnNegotiation,proto3" json:"enable_alpn_negotiation,omitempty"` ++ // The ordered list of ALPN protocols that specify how the application SHOULD ++ // negotiate ALPN during the TLS handshake. ++ // ++ // The application MAY ignore any ALPN protocols in this list that are not ++ // supported by the application. ++ AlpnProtocols []common_go_proto.AlpnProtocol `protobuf:"varint,2,rep,packed,name=alpn_protocols,json=alpnProtocols,proto3,enum=s2a.proto.v2.AlpnProtocol" json:"alpn_protocols,omitempty"` ++} ++ ++func (x *AlpnPolicy) Reset() { ++ *x = AlpnPolicy{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *AlpnPolicy) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*AlpnPolicy) ProtoMessage() {} ++ ++func (x *AlpnPolicy) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use AlpnPolicy.ProtoReflect.Descriptor instead. ++func (*AlpnPolicy) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} ++} ++ ++func (x *AlpnPolicy) GetEnableAlpnNegotiation() bool { ++ if x != nil { ++ return x.EnableAlpnNegotiation ++ } ++ return false ++} ++ ++func (x *AlpnPolicy) GetAlpnProtocols() []common_go_proto.AlpnProtocol { ++ if x != nil { ++ return x.AlpnProtocols ++ } ++ return nil ++} ++ ++type AuthenticationMechanism struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // Applications may specify an identity associated to an authentication ++ // mechanism. Otherwise, S2A assumes that the authentication mechanism is ++ // associated with the default identity. If the default identity cannot be ++ // determined, the request is rejected. ++ Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` ++ // Types that are assignable to MechanismOneof: ++ // ++ // *AuthenticationMechanism_Token ++ MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` ++} ++ ++func (x *AuthenticationMechanism) Reset() { ++ *x = AuthenticationMechanism{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *AuthenticationMechanism) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*AuthenticationMechanism) ProtoMessage() {} ++ ++func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. ++func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} ++} ++ ++func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { ++ if x != nil { ++ return x.Identity ++ } ++ return nil ++} ++ ++func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { ++ if m != nil { ++ return m.MechanismOneof ++ } ++ return nil ++} ++ ++func (x *AuthenticationMechanism) GetToken() string { ++ if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { ++ return x.Token ++ } ++ return "" ++} ++ ++type isAuthenticationMechanism_MechanismOneof interface { ++ isAuthenticationMechanism_MechanismOneof() ++} ++ ++type AuthenticationMechanism_Token struct { ++ // A token that the application uses to authenticate itself to S2A. ++ Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` ++} ++ ++func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} ++ ++type Status struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The status code that is specific to the application and the implementation ++ // of S2A, e.g., gRPC status code. ++ Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` ++ // The status details. ++ Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` ++} ++ ++func (x *Status) Reset() { ++ *x = Status{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *Status) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*Status) ProtoMessage() {} ++ ++func (x *Status) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use Status.ProtoReflect.Descriptor instead. ++func (*Status) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{2} ++} ++ ++func (x *Status) GetCode() uint32 { ++ if x != nil { ++ return x.Code ++ } ++ return 0 ++} ++ ++func (x *Status) GetDetails() string { ++ if x != nil { ++ return x.Details ++ } ++ return "" ++} ++ ++type GetTlsConfigurationReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The role of the application in the TLS connection. ++ ConnectionSide common_go_proto.ConnectionSide `protobuf:"varint,1,opt,name=connection_side,json=connectionSide,proto3,enum=s2a.proto.v2.ConnectionSide" json:"connection_side,omitempty"` ++ // The server name indication (SNI) extension, which MAY be populated when a ++ // server is offloading to S2A. The SNI is used to determine the server ++ // identity if the local identity in the request is empty. ++ Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"` ++} ++ ++func (x *GetTlsConfigurationReq) Reset() { ++ *x = GetTlsConfigurationReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *GetTlsConfigurationReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*GetTlsConfigurationReq) ProtoMessage() {} ++ ++func (x *GetTlsConfigurationReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use GetTlsConfigurationReq.ProtoReflect.Descriptor instead. ++func (*GetTlsConfigurationReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{3} ++} ++ ++func (x *GetTlsConfigurationReq) GetConnectionSide() common_go_proto.ConnectionSide { ++ if x != nil { ++ return x.ConnectionSide ++ } ++ return common_go_proto.ConnectionSide(0) ++} ++ ++func (x *GetTlsConfigurationReq) GetSni() string { ++ if x != nil { ++ return x.Sni ++ } ++ return "" ++} ++ ++type GetTlsConfigurationResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // Types that are assignable to TlsConfiguration: ++ // ++ // *GetTlsConfigurationResp_ClientTlsConfiguration_ ++ // *GetTlsConfigurationResp_ServerTlsConfiguration_ ++ TlsConfiguration isGetTlsConfigurationResp_TlsConfiguration `protobuf_oneof:"tls_configuration"` ++} ++ ++func (x *GetTlsConfigurationResp) Reset() { ++ *x = GetTlsConfigurationResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *GetTlsConfigurationResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*GetTlsConfigurationResp) ProtoMessage() {} ++ ++func (x *GetTlsConfigurationResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use GetTlsConfigurationResp.ProtoReflect.Descriptor instead. ++func (*GetTlsConfigurationResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4} ++} ++ ++func (m *GetTlsConfigurationResp) GetTlsConfiguration() isGetTlsConfigurationResp_TlsConfiguration { ++ if m != nil { ++ return m.TlsConfiguration ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp) GetClientTlsConfiguration() *GetTlsConfigurationResp_ClientTlsConfiguration { ++ if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ClientTlsConfiguration_); ok { ++ return x.ClientTlsConfiguration ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp) GetServerTlsConfiguration() *GetTlsConfigurationResp_ServerTlsConfiguration { ++ if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ServerTlsConfiguration_); ok { ++ return x.ServerTlsConfiguration ++ } ++ return nil ++} ++ ++type isGetTlsConfigurationResp_TlsConfiguration interface { ++ isGetTlsConfigurationResp_TlsConfiguration() ++} ++ ++type GetTlsConfigurationResp_ClientTlsConfiguration_ struct { ++ ClientTlsConfiguration *GetTlsConfigurationResp_ClientTlsConfiguration `protobuf:"bytes,1,opt,name=client_tls_configuration,json=clientTlsConfiguration,proto3,oneof"` ++} ++ ++type GetTlsConfigurationResp_ServerTlsConfiguration_ struct { ++ ServerTlsConfiguration *GetTlsConfigurationResp_ServerTlsConfiguration `protobuf:"bytes,2,opt,name=server_tls_configuration,json=serverTlsConfiguration,proto3,oneof"` ++} ++ ++func (*GetTlsConfigurationResp_ClientTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { ++} ++ ++func (*GetTlsConfigurationResp_ServerTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { ++} ++ ++type OffloadPrivateKeyOperationReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The operation the private key is used for. ++ Operation OffloadPrivateKeyOperationReq_PrivateKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadPrivateKeyOperationReq_PrivateKeyOperation" json:"operation,omitempty"` ++ // The signature algorithm to be used for signing operations. ++ SignatureAlgorithm SignatureAlgorithm `protobuf:"varint,2,opt,name=signature_algorithm,json=signatureAlgorithm,proto3,enum=s2a.proto.v2.SignatureAlgorithm" json:"signature_algorithm,omitempty"` ++ // The input bytes to be signed or decrypted. ++ // ++ // Types that are assignable to InBytes: ++ // ++ // *OffloadPrivateKeyOperationReq_RawBytes ++ // *OffloadPrivateKeyOperationReq_Sha256Digest ++ // *OffloadPrivateKeyOperationReq_Sha384Digest ++ // *OffloadPrivateKeyOperationReq_Sha512Digest ++ InBytes isOffloadPrivateKeyOperationReq_InBytes `protobuf_oneof:"in_bytes"` ++} ++ ++func (x *OffloadPrivateKeyOperationReq) Reset() { ++ *x = OffloadPrivateKeyOperationReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *OffloadPrivateKeyOperationReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*OffloadPrivateKeyOperationReq) ProtoMessage() {} ++ ++func (x *OffloadPrivateKeyOperationReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use OffloadPrivateKeyOperationReq.ProtoReflect.Descriptor instead. ++func (*OffloadPrivateKeyOperationReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5} ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetOperation() OffloadPrivateKeyOperationReq_PrivateKeyOperation { ++ if x != nil { ++ return x.Operation ++ } ++ return OffloadPrivateKeyOperationReq_UNSPECIFIED ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetSignatureAlgorithm() SignatureAlgorithm { ++ if x != nil { ++ return x.SignatureAlgorithm ++ } ++ return SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED ++} ++ ++func (m *OffloadPrivateKeyOperationReq) GetInBytes() isOffloadPrivateKeyOperationReq_InBytes { ++ if m != nil { ++ return m.InBytes ++ } ++ return nil ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetRawBytes() []byte { ++ if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_RawBytes); ok { ++ return x.RawBytes ++ } ++ return nil ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetSha256Digest() []byte { ++ if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha256Digest); ok { ++ return x.Sha256Digest ++ } ++ return nil ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetSha384Digest() []byte { ++ if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha384Digest); ok { ++ return x.Sha384Digest ++ } ++ return nil ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetSha512Digest() []byte { ++ if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha512Digest); ok { ++ return x.Sha512Digest ++ } ++ return nil ++} ++ ++type isOffloadPrivateKeyOperationReq_InBytes interface { ++ isOffloadPrivateKeyOperationReq_InBytes() ++} ++ ++type OffloadPrivateKeyOperationReq_RawBytes struct { ++ // Raw bytes to be hashed and signed, or decrypted. ++ RawBytes []byte `protobuf:"bytes,4,opt,name=raw_bytes,json=rawBytes,proto3,oneof"` ++} ++ ++type OffloadPrivateKeyOperationReq_Sha256Digest struct { ++ // A SHA256 hash to be signed. Must be 32 bytes. ++ Sha256Digest []byte `protobuf:"bytes,5,opt,name=sha256_digest,json=sha256Digest,proto3,oneof"` ++} ++ ++type OffloadPrivateKeyOperationReq_Sha384Digest struct { ++ // A SHA384 hash to be signed. Must be 48 bytes. ++ Sha384Digest []byte `protobuf:"bytes,6,opt,name=sha384_digest,json=sha384Digest,proto3,oneof"` ++} ++ ++type OffloadPrivateKeyOperationReq_Sha512Digest struct { ++ // A SHA512 hash to be signed. Must be 64 bytes. ++ Sha512Digest []byte `protobuf:"bytes,7,opt,name=sha512_digest,json=sha512Digest,proto3,oneof"` ++} ++ ++func (*OffloadPrivateKeyOperationReq_RawBytes) isOffloadPrivateKeyOperationReq_InBytes() {} ++ ++func (*OffloadPrivateKeyOperationReq_Sha256Digest) isOffloadPrivateKeyOperationReq_InBytes() {} ++ ++func (*OffloadPrivateKeyOperationReq_Sha384Digest) isOffloadPrivateKeyOperationReq_InBytes() {} ++ ++func (*OffloadPrivateKeyOperationReq_Sha512Digest) isOffloadPrivateKeyOperationReq_InBytes() {} ++ ++type OffloadPrivateKeyOperationResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The signed or decrypted output bytes. ++ OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` ++} ++ ++func (x *OffloadPrivateKeyOperationResp) Reset() { ++ *x = OffloadPrivateKeyOperationResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *OffloadPrivateKeyOperationResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*OffloadPrivateKeyOperationResp) ProtoMessage() {} ++ ++func (x *OffloadPrivateKeyOperationResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use OffloadPrivateKeyOperationResp.ProtoReflect.Descriptor instead. ++func (*OffloadPrivateKeyOperationResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{6} ++} ++ ++func (x *OffloadPrivateKeyOperationResp) GetOutBytes() []byte { ++ if x != nil { ++ return x.OutBytes ++ } ++ return nil ++} ++ ++type OffloadResumptionKeyOperationReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The operation the resumption key is used for. ++ Operation OffloadResumptionKeyOperationReq_ResumptionKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadResumptionKeyOperationReq_ResumptionKeyOperation" json:"operation,omitempty"` ++ // The bytes to be encrypted or decrypted. ++ InBytes []byte `protobuf:"bytes,2,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` ++} ++ ++func (x *OffloadResumptionKeyOperationReq) Reset() { ++ *x = OffloadResumptionKeyOperationReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *OffloadResumptionKeyOperationReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*OffloadResumptionKeyOperationReq) ProtoMessage() {} ++ ++func (x *OffloadResumptionKeyOperationReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use OffloadResumptionKeyOperationReq.ProtoReflect.Descriptor instead. ++func (*OffloadResumptionKeyOperationReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7} ++} ++ ++func (x *OffloadResumptionKeyOperationReq) GetOperation() OffloadResumptionKeyOperationReq_ResumptionKeyOperation { ++ if x != nil { ++ return x.Operation ++ } ++ return OffloadResumptionKeyOperationReq_UNSPECIFIED ++} ++ ++func (x *OffloadResumptionKeyOperationReq) GetInBytes() []byte { ++ if x != nil { ++ return x.InBytes ++ } ++ return nil ++} ++ ++type OffloadResumptionKeyOperationResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The encrypted or decrypted bytes. ++ OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` ++} ++ ++func (x *OffloadResumptionKeyOperationResp) Reset() { ++ *x = OffloadResumptionKeyOperationResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *OffloadResumptionKeyOperationResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*OffloadResumptionKeyOperationResp) ProtoMessage() {} ++ ++func (x *OffloadResumptionKeyOperationResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use OffloadResumptionKeyOperationResp.ProtoReflect.Descriptor instead. ++func (*OffloadResumptionKeyOperationResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{8} ++} ++ ++func (x *OffloadResumptionKeyOperationResp) GetOutBytes() []byte { ++ if x != nil { ++ return x.OutBytes ++ } ++ return nil ++} ++ ++type ValidatePeerCertificateChainReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The verification mode that S2A MUST use to validate the peer certificate ++ // chain. ++ Mode ValidatePeerCertificateChainReq_VerificationMode `protobuf:"varint,1,opt,name=mode,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainReq_VerificationMode" json:"mode,omitempty"` ++ // Types that are assignable to PeerOneof: ++ // ++ // *ValidatePeerCertificateChainReq_ClientPeer_ ++ // *ValidatePeerCertificateChainReq_ServerPeer_ ++ PeerOneof isValidatePeerCertificateChainReq_PeerOneof `protobuf_oneof:"peer_oneof"` ++} ++ ++func (x *ValidatePeerCertificateChainReq) Reset() { ++ *x = ValidatePeerCertificateChainReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ValidatePeerCertificateChainReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ValidatePeerCertificateChainReq) ProtoMessage() {} ++ ++func (x *ValidatePeerCertificateChainReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainReq.ProtoReflect.Descriptor instead. ++func (*ValidatePeerCertificateChainReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9} ++} ++ ++func (x *ValidatePeerCertificateChainReq) GetMode() ValidatePeerCertificateChainReq_VerificationMode { ++ if x != nil { ++ return x.Mode ++ } ++ return ValidatePeerCertificateChainReq_UNSPECIFIED ++} ++ ++func (m *ValidatePeerCertificateChainReq) GetPeerOneof() isValidatePeerCertificateChainReq_PeerOneof { ++ if m != nil { ++ return m.PeerOneof ++ } ++ return nil ++} ++ ++func (x *ValidatePeerCertificateChainReq) GetClientPeer() *ValidatePeerCertificateChainReq_ClientPeer { ++ if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ClientPeer_); ok { ++ return x.ClientPeer ++ } ++ return nil ++} ++ ++func (x *ValidatePeerCertificateChainReq) GetServerPeer() *ValidatePeerCertificateChainReq_ServerPeer { ++ if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ServerPeer_); ok { ++ return x.ServerPeer ++ } ++ return nil ++} ++ ++type isValidatePeerCertificateChainReq_PeerOneof interface { ++ isValidatePeerCertificateChainReq_PeerOneof() ++} ++ ++type ValidatePeerCertificateChainReq_ClientPeer_ struct { ++ ClientPeer *ValidatePeerCertificateChainReq_ClientPeer `protobuf:"bytes,2,opt,name=client_peer,json=clientPeer,proto3,oneof"` ++} ++ ++type ValidatePeerCertificateChainReq_ServerPeer_ struct { ++ ServerPeer *ValidatePeerCertificateChainReq_ServerPeer `protobuf:"bytes,3,opt,name=server_peer,json=serverPeer,proto3,oneof"` ++} ++ ++func (*ValidatePeerCertificateChainReq_ClientPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} ++ ++func (*ValidatePeerCertificateChainReq_ServerPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} ++ ++type ValidatePeerCertificateChainResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The result of validating the peer certificate chain. ++ ValidationResult ValidatePeerCertificateChainResp_ValidationResult `protobuf:"varint,1,opt,name=validation_result,json=validationResult,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainResp_ValidationResult" json:"validation_result,omitempty"` ++ // The validation details. This field is only populated when the validation ++ // result is NOT SUCCESS. ++ ValidationDetails string `protobuf:"bytes,2,opt,name=validation_details,json=validationDetails,proto3" json:"validation_details,omitempty"` ++ // The S2A context contains information from the peer certificate chain. ++ // ++ // The S2A context MAY be populated even if validation of the peer certificate ++ // chain fails. ++ Context *s2a_context_go_proto.S2AContext `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"` ++} ++ ++func (x *ValidatePeerCertificateChainResp) Reset() { ++ *x = ValidatePeerCertificateChainResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ValidatePeerCertificateChainResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ValidatePeerCertificateChainResp) ProtoMessage() {} ++ ++func (x *ValidatePeerCertificateChainResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainResp.ProtoReflect.Descriptor instead. ++func (*ValidatePeerCertificateChainResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10} ++} ++ ++func (x *ValidatePeerCertificateChainResp) GetValidationResult() ValidatePeerCertificateChainResp_ValidationResult { ++ if x != nil { ++ return x.ValidationResult ++ } ++ return ValidatePeerCertificateChainResp_UNSPECIFIED ++} ++ ++func (x *ValidatePeerCertificateChainResp) GetValidationDetails() string { ++ if x != nil { ++ return x.ValidationDetails ++ } ++ return "" ++} ++ ++func (x *ValidatePeerCertificateChainResp) GetContext() *s2a_context_go_proto.S2AContext { ++ if x != nil { ++ return x.Context ++ } ++ return nil ++} ++ ++type SessionReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The identity corresponding to the TLS configurations that MUST be used for ++ // the TLS handshake. ++ // ++ // If a managed identity already exists, the local identity and authentication ++ // mechanisms are ignored. If a managed identity doesn't exist and the local ++ // identity is not populated, S2A will try to deduce the managed identity to ++ // use from the SNI extension. If that also fails, S2A uses the default ++ // identity (if one exists). ++ LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The authentication mechanisms that the application wishes to use to ++ // authenticate to S2A, ordered by preference. S2A will always use the first ++ // authentication mechanism that matches the managed identity. ++ AuthenticationMechanisms []*AuthenticationMechanism `protobuf:"bytes,2,rep,name=authentication_mechanisms,json=authenticationMechanisms,proto3" json:"authentication_mechanisms,omitempty"` ++ // Types that are assignable to ReqOneof: ++ // ++ // *SessionReq_GetTlsConfigurationReq ++ // *SessionReq_OffloadPrivateKeyOperationReq ++ // *SessionReq_OffloadResumptionKeyOperationReq ++ // *SessionReq_ValidatePeerCertificateChainReq ++ ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` ++} ++ ++func (x *SessionReq) Reset() { ++ *x = SessionReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionReq) ProtoMessage() {} ++ ++func (x *SessionReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. ++func (*SessionReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} ++} ++ ++func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetAuthenticationMechanisms() []*AuthenticationMechanism { ++ if x != nil { ++ return x.AuthenticationMechanisms ++ } ++ return nil ++} ++ ++func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { ++ if m != nil { ++ return m.ReqOneof ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetGetTlsConfigurationReq() *GetTlsConfigurationReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_GetTlsConfigurationReq); ok { ++ return x.GetTlsConfigurationReq ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetOffloadPrivateKeyOperationReq() *OffloadPrivateKeyOperationReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_OffloadPrivateKeyOperationReq); ok { ++ return x.OffloadPrivateKeyOperationReq ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetOffloadResumptionKeyOperationReq() *OffloadResumptionKeyOperationReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_OffloadResumptionKeyOperationReq); ok { ++ return x.OffloadResumptionKeyOperationReq ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetValidatePeerCertificateChainReq() *ValidatePeerCertificateChainReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_ValidatePeerCertificateChainReq); ok { ++ return x.ValidatePeerCertificateChainReq ++ } ++ return nil ++} ++ ++type isSessionReq_ReqOneof interface { ++ isSessionReq_ReqOneof() ++} ++ ++type SessionReq_GetTlsConfigurationReq struct { ++ // Requests the certificate chain and TLS configuration corresponding to the ++ // local identity, which the application MUST use to negotiate the TLS ++ // handshake. ++ GetTlsConfigurationReq *GetTlsConfigurationReq `protobuf:"bytes,3,opt,name=get_tls_configuration_req,json=getTlsConfigurationReq,proto3,oneof"` ++} ++ ++type SessionReq_OffloadPrivateKeyOperationReq struct { ++ // Signs or decrypts the input bytes using a private key corresponding to ++ // the local identity in the request. ++ // ++ // WARNING: More than one OffloadPrivateKeyOperationReq may be sent to the ++ // S2Av2 by a server during a TLS 1.2 handshake. ++ OffloadPrivateKeyOperationReq *OffloadPrivateKeyOperationReq `protobuf:"bytes,4,opt,name=offload_private_key_operation_req,json=offloadPrivateKeyOperationReq,proto3,oneof"` ++} ++ ++type SessionReq_OffloadResumptionKeyOperationReq struct { ++ // Encrypts or decrypts the input bytes using a resumption key corresponding ++ // to the local identity in the request. ++ OffloadResumptionKeyOperationReq *OffloadResumptionKeyOperationReq `protobuf:"bytes,5,opt,name=offload_resumption_key_operation_req,json=offloadResumptionKeyOperationReq,proto3,oneof"` ++} ++ ++type SessionReq_ValidatePeerCertificateChainReq struct { ++ // Verifies the peer's certificate chain using ++ // (a) trust bundles corresponding to the local identity in the request, and ++ // (b) the verification mode in the request. ++ ValidatePeerCertificateChainReq *ValidatePeerCertificateChainReq `protobuf:"bytes,6,opt,name=validate_peer_certificate_chain_req,json=validatePeerCertificateChainReq,proto3,oneof"` ++} ++ ++func (*SessionReq_GetTlsConfigurationReq) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_OffloadPrivateKeyOperationReq) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_OffloadResumptionKeyOperationReq) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_ValidatePeerCertificateChainReq) isSessionReq_ReqOneof() {} ++ ++type SessionResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // Status of the session response. ++ // ++ // The status field is populated so that if an error occurs when making an ++ // individual request, then communication with the S2A may continue. If an ++ // error is returned directly (e.g. at the gRPC layer), then it may result ++ // that the bidirectional stream being closed. ++ Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` ++ // Types that are assignable to RespOneof: ++ // ++ // *SessionResp_GetTlsConfigurationResp ++ // *SessionResp_OffloadPrivateKeyOperationResp ++ // *SessionResp_OffloadResumptionKeyOperationResp ++ // *SessionResp_ValidatePeerCertificateChainResp ++ RespOneof isSessionResp_RespOneof `protobuf_oneof:"resp_oneof"` ++} ++ ++func (x *SessionResp) Reset() { ++ *x = SessionResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionResp) ProtoMessage() {} ++ ++func (x *SessionResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. ++func (*SessionResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{12} ++} ++ ++func (x *SessionResp) GetStatus() *Status { ++ if x != nil { ++ return x.Status ++ } ++ return nil ++} ++ ++func (m *SessionResp) GetRespOneof() isSessionResp_RespOneof { ++ if m != nil { ++ return m.RespOneof ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetGetTlsConfigurationResp() *GetTlsConfigurationResp { ++ if x, ok := x.GetRespOneof().(*SessionResp_GetTlsConfigurationResp); ok { ++ return x.GetTlsConfigurationResp ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetOffloadPrivateKeyOperationResp() *OffloadPrivateKeyOperationResp { ++ if x, ok := x.GetRespOneof().(*SessionResp_OffloadPrivateKeyOperationResp); ok { ++ return x.OffloadPrivateKeyOperationResp ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetOffloadResumptionKeyOperationResp() *OffloadResumptionKeyOperationResp { ++ if x, ok := x.GetRespOneof().(*SessionResp_OffloadResumptionKeyOperationResp); ok { ++ return x.OffloadResumptionKeyOperationResp ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetValidatePeerCertificateChainResp() *ValidatePeerCertificateChainResp { ++ if x, ok := x.GetRespOneof().(*SessionResp_ValidatePeerCertificateChainResp); ok { ++ return x.ValidatePeerCertificateChainResp ++ } ++ return nil ++} ++ ++type isSessionResp_RespOneof interface { ++ isSessionResp_RespOneof() ++} ++ ++type SessionResp_GetTlsConfigurationResp struct { ++ // Contains the certificate chain and TLS configurations corresponding to ++ // the local identity. ++ GetTlsConfigurationResp *GetTlsConfigurationResp `protobuf:"bytes,2,opt,name=get_tls_configuration_resp,json=getTlsConfigurationResp,proto3,oneof"` ++} ++ ++type SessionResp_OffloadPrivateKeyOperationResp struct { ++ // Contains the signed or encrypted output bytes using the private key ++ // corresponding to the local identity. ++ OffloadPrivateKeyOperationResp *OffloadPrivateKeyOperationResp `protobuf:"bytes,3,opt,name=offload_private_key_operation_resp,json=offloadPrivateKeyOperationResp,proto3,oneof"` ++} ++ ++type SessionResp_OffloadResumptionKeyOperationResp struct { ++ // Contains the encrypted or decrypted output bytes using the resumption key ++ // corresponding to the local identity. ++ OffloadResumptionKeyOperationResp *OffloadResumptionKeyOperationResp `protobuf:"bytes,4,opt,name=offload_resumption_key_operation_resp,json=offloadResumptionKeyOperationResp,proto3,oneof"` ++} ++ ++type SessionResp_ValidatePeerCertificateChainResp struct { ++ // Contains the validation result, peer identity and fingerprints of peer ++ // certificates. ++ ValidatePeerCertificateChainResp *ValidatePeerCertificateChainResp `protobuf:"bytes,5,opt,name=validate_peer_certificate_chain_resp,json=validatePeerCertificateChainResp,proto3,oneof"` ++} ++ ++func (*SessionResp_GetTlsConfigurationResp) isSessionResp_RespOneof() {} ++ ++func (*SessionResp_OffloadPrivateKeyOperationResp) isSessionResp_RespOneof() {} ++ ++func (*SessionResp_OffloadResumptionKeyOperationResp) isSessionResp_RespOneof() {} ++ ++func (*SessionResp_ValidatePeerCertificateChainResp) isSessionResp_RespOneof() {} ++ ++// Next ID: 8 ++type GetTlsConfigurationResp_ClientTlsConfiguration struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The certificate chain that the client MUST use for the TLS handshake. ++ // It's a list of PEM-encoded certificates, ordered from leaf to root, ++ // excluding the root. ++ CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` ++ // The minimum TLS version number that the client MUST use for the TLS ++ // handshake. If this field is not provided, the client MUST use the default ++ // minimum version of the client's TLS library. ++ MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` ++ // The maximum TLS version number that the client MUST use for the TLS ++ // handshake. If this field is not provided, the client MUST use the default ++ // maximum version of the client's TLS library. ++ MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` ++ // The ordered list of TLS 1.0-1.2 ciphersuites that the client MAY offer to ++ // negotiate in the TLS handshake. ++ Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,6,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` ++ // The policy that dictates how the client negotiates ALPN during the TLS ++ // handshake. ++ AlpnPolicy *AlpnPolicy `protobuf:"bytes,7,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) Reset() { ++ *x = GetTlsConfigurationResp_ClientTlsConfiguration{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*GetTlsConfigurationResp_ClientTlsConfiguration) ProtoMessage() {} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use GetTlsConfigurationResp_ClientTlsConfiguration.ProtoReflect.Descriptor instead. ++func (*GetTlsConfigurationResp_ClientTlsConfiguration) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 0} ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCertificateChain() []string { ++ if x != nil { ++ return x.CertificateChain ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MinTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MaxTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.Ciphersuites ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { ++ if x != nil { ++ return x.AlpnPolicy ++ } ++ return nil ++} ++ ++// Next ID: 12 ++type GetTlsConfigurationResp_ServerTlsConfiguration struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The certificate chain that the server MUST use for the TLS handshake. ++ // It's a list of PEM-encoded certificates, ordered from leaf to root, ++ // excluding the root. ++ CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` ++ // The minimum TLS version number that the server MUST use for the TLS ++ // handshake. If this field is not provided, the server MUST use the default ++ // minimum version of the server's TLS library. ++ MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` ++ // The maximum TLS version number that the server MUST use for the TLS ++ // handshake. If this field is not provided, the server MUST use the default ++ // maximum version of the server's TLS library. ++ MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` ++ // The ordered list of TLS 1.0-1.2 ciphersuites that the server MAY offer to ++ // negotiate in the TLS handshake. ++ Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,10,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` ++ // Whether to enable TLS resumption. ++ TlsResumptionEnabled bool `protobuf:"varint,6,opt,name=tls_resumption_enabled,json=tlsResumptionEnabled,proto3" json:"tls_resumption_enabled,omitempty"` ++ // Whether the server MUST request a client certificate (i.e. to negotiate ++ // TLS vs. mTLS). ++ RequestClientCertificate GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate `protobuf:"varint,7,opt,name=request_client_certificate,json=requestClientCertificate,proto3,enum=s2a.proto.v2.GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate" json:"request_client_certificate,omitempty"` ++ // Returns the maximum number of extra bytes that ++ // |OffloadResumptionKeyOperation| can add to the number of unencrypted ++ // bytes to form the encrypted bytes. ++ MaxOverheadOfTicketAead uint32 `protobuf:"varint,9,opt,name=max_overhead_of_ticket_aead,json=maxOverheadOfTicketAead,proto3" json:"max_overhead_of_ticket_aead,omitempty"` ++ // The policy that dictates how the server negotiates ALPN during the TLS ++ // handshake. ++ AlpnPolicy *AlpnPolicy `protobuf:"bytes,11,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) Reset() { ++ *x = GetTlsConfigurationResp_ServerTlsConfiguration{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*GetTlsConfigurationResp_ServerTlsConfiguration) ProtoMessage() {} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration.ProtoReflect.Descriptor instead. ++func (*GetTlsConfigurationResp_ServerTlsConfiguration) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1} ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCertificateChain() []string { ++ if x != nil { ++ return x.CertificateChain ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MinTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MaxTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.Ciphersuites ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetTlsResumptionEnabled() bool { ++ if x != nil { ++ return x.TlsResumptionEnabled ++ } ++ return false ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetRequestClientCertificate() GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { ++ if x != nil { ++ return x.RequestClientCertificate ++ } ++ return GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxOverheadOfTicketAead() uint32 { ++ if x != nil { ++ return x.MaxOverheadOfTicketAead ++ } ++ return 0 ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { ++ if x != nil { ++ return x.AlpnPolicy ++ } ++ return nil ++} ++ ++type ValidatePeerCertificateChainReq_ClientPeer struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The certificate chain to be verified. The chain MUST be a list of ++ // DER-encoded certificates, ordered from leaf to root, excluding the root. ++ CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` ++} ++ ++func (x *ValidatePeerCertificateChainReq_ClientPeer) Reset() { ++ *x = ValidatePeerCertificateChainReq_ClientPeer{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ValidatePeerCertificateChainReq_ClientPeer) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ValidatePeerCertificateChainReq_ClientPeer) ProtoMessage() {} ++ ++func (x *ValidatePeerCertificateChainReq_ClientPeer) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainReq_ClientPeer.ProtoReflect.Descriptor instead. ++func (*ValidatePeerCertificateChainReq_ClientPeer) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} ++} ++ ++func (x *ValidatePeerCertificateChainReq_ClientPeer) GetCertificateChain() [][]byte { ++ if x != nil { ++ return x.CertificateChain ++ } ++ return nil ++} ++ ++type ValidatePeerCertificateChainReq_ServerPeer struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The certificate chain to be verified. The chain MUST be a list of ++ // DER-encoded certificates, ordered from leaf to root, excluding the root. ++ CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` ++ // The expected hostname of the server. ++ ServerHostname string `protobuf:"bytes,2,opt,name=server_hostname,json=serverHostname,proto3" json:"server_hostname,omitempty"` ++ // The UnrestrictedClientPolicy specified by the user. ++ SerializedUnrestrictedClientPolicy []byte `protobuf:"bytes,3,opt,name=serialized_unrestricted_client_policy,json=serializedUnrestrictedClientPolicy,proto3" json:"serialized_unrestricted_client_policy,omitempty"` ++} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) Reset() { ++ *x = ValidatePeerCertificateChainReq_ServerPeer{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ValidatePeerCertificateChainReq_ServerPeer) ProtoMessage() {} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainReq_ServerPeer.ProtoReflect.Descriptor instead. ++func (*ValidatePeerCertificateChainReq_ServerPeer) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 1} ++} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) GetCertificateChain() [][]byte { ++ if x != nil { ++ return x.CertificateChain ++ } ++ return nil ++} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) GetServerHostname() string { ++ if x != nil { ++ return x.ServerHostname ++ } ++ return "" ++} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) GetSerializedUnrestrictedClientPolicy() []byte { ++ if x != nil { ++ return x.SerializedUnrestrictedClientPolicy ++ } ++ return nil ++} ++ ++var File_internal_proto_v2_s2a_s2a_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ ++ 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, ++ 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, ++ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, ++ 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, ++ 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, ++ 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, ++ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, ++ 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, ++ 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, ++ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, ++ 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, ++ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, ++ 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, ++ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, ++ 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, ++ 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, ++ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, ++ 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, ++ 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, ++ 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, ++ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, ++ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, ++ 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, ++ 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, ++ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, ++ 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, ++ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, ++ 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, ++ 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, ++ 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, ++ 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, ++ 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, ++ 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, ++ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, ++ 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, ++ 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, ++ 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, ++ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, ++ 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, ++ 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, ++ 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, ++ 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, ++ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, ++ 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, ++ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, ++ 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, ++ 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, ++ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, ++ 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, ++ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, ++ 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, ++ 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, ++ 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, ++ 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, ++ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, ++ 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, ++ 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, ++ 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, ++ 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, ++ 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, ++ 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, ++ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, ++ 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, ++ 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, ++ 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, ++ 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, ++ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, ++ 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, ++ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, ++ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, ++ 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, ++ 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, ++ 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, ++ 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, ++ 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, ++ 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, ++ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, ++ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, ++ 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, ++ 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, ++ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, ++ 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, ++ 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, ++ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, ++ 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, ++ 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, ++ 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, ++ 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, ++ 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, ++ 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, ++ 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, ++ 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, ++ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, ++ 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, ++ 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, ++ 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, ++ 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, ++ 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, ++ 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, ++ 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, ++ 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, ++ 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, ++ 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, ++ 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, ++ 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, ++ 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, ++ 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, ++ 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, ++ 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, ++ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, ++ 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, ++ 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, ++ 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, ++ 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, ++ 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, ++ 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, ++ 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, ++ 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, ++ 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, ++ 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, ++ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, ++ 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, ++ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, ++ 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, ++ 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, ++ 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, ++ 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, ++ 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, ++ 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, ++ 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, ++ 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, ++ 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, ++ 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, ++ 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, ++ 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, ++ 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, ++ 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, ++ 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, ++ 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, ++ 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, ++ 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, ++ 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, ++ 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, ++ 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, ++ 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, ++ 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, ++ 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, ++ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, ++ 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, ++ 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, ++ 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, ++ 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, ++ 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, ++ 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, ++ 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, ++ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, ++ 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, ++ 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, ++ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, ++ 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, ++ 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, ++ 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, ++ 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, ++ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, ++ 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, ++ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, ++ 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, ++ 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, ++ 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, ++ 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, ++ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, ++ 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, ++ 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, ++ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, ++ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, ++ 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, ++ 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, ++ 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, ++ 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, ++ 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, ++ 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, ++ 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, ++ 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, ++ 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, ++ 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, ++ 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, ++ 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, ++ 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, ++ 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, ++ 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, ++ 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, ++ 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, ++ 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, ++ 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, ++ 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, ++ 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, ++ 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, ++ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, ++ 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, ++ 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, ++ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, ++ 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, ++ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, ++ 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, ++ 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, ++ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, ++ 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, ++ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, ++ 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, ++ 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, ++ 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, ++ 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, ++ 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, ++ 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, ++ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, ++ 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, ++ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, ++ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, ++ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, ++ 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, ++ 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, ++ 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, ++ 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, ++ 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, ++ 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, ++ 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, ++ 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, ++ 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, ++ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, ++ 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, ++ 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, ++ 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, ++ 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, ++ 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, ++ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, ++ 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, ++ 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, ++ 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, ++ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, ++ 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, ++ 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, ++ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, ++ 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, ++ 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, ++ 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, ++ 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, ++ 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, ++ 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, ++ 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, ++ 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, ++ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, ++ 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, ++ 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, ++ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, ++ 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, ++ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, ++ 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, ++ 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, ++ 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, ++ 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, ++ 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, ++ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, ++ 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, ++ 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, ++ 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, ++ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, ++ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, ++ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, ++ 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, ++ 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, ++ 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, ++ 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, ++ 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, ++ 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, ++ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, ++ 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, ++ 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, ++ 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, ++ 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, ++ 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, ++ 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, ++ 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, ++ 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, ++ 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, ++ 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, ++ 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, ++ 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, ++ 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, ++ 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, ++ 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, ++ 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, ++ 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, ++ 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, ++ 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, ++ 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, ++ 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, ++ 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, ++ 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, ++ 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, ++ 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, ++ 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, ++ 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, ++ 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, ++ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, ++ 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, ++ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, ++ 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_v2_s2a_s2a_proto_rawDescOnce sync.Once ++ file_internal_proto_v2_s2a_s2a_proto_rawDescData = file_internal_proto_v2_s2a_s2a_proto_rawDesc ++) ++ ++func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { ++ file_internal_proto_v2_s2a_s2a_proto_rawDescOnce.Do(func() { ++ file_internal_proto_v2_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_s2a_proto_rawDescData) ++ }) ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescData ++} ++ ++var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) ++var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) ++var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ ++ (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm ++ (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate ++ (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation ++ (OffloadResumptionKeyOperationReq_ResumptionKeyOperation)(0), // 3: s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation ++ (ValidatePeerCertificateChainReq_VerificationMode)(0), // 4: s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode ++ (ValidatePeerCertificateChainResp_ValidationResult)(0), // 5: s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult ++ (*AlpnPolicy)(nil), // 6: s2a.proto.v2.AlpnPolicy ++ (*AuthenticationMechanism)(nil), // 7: s2a.proto.v2.AuthenticationMechanism ++ (*Status)(nil), // 8: s2a.proto.v2.Status ++ (*GetTlsConfigurationReq)(nil), // 9: s2a.proto.v2.GetTlsConfigurationReq ++ (*GetTlsConfigurationResp)(nil), // 10: s2a.proto.v2.GetTlsConfigurationResp ++ (*OffloadPrivateKeyOperationReq)(nil), // 11: s2a.proto.v2.OffloadPrivateKeyOperationReq ++ (*OffloadPrivateKeyOperationResp)(nil), // 12: s2a.proto.v2.OffloadPrivateKeyOperationResp ++ (*OffloadResumptionKeyOperationReq)(nil), // 13: s2a.proto.v2.OffloadResumptionKeyOperationReq ++ (*OffloadResumptionKeyOperationResp)(nil), // 14: s2a.proto.v2.OffloadResumptionKeyOperationResp ++ (*ValidatePeerCertificateChainReq)(nil), // 15: s2a.proto.v2.ValidatePeerCertificateChainReq ++ (*ValidatePeerCertificateChainResp)(nil), // 16: s2a.proto.v2.ValidatePeerCertificateChainResp ++ (*SessionReq)(nil), // 17: s2a.proto.v2.SessionReq ++ (*SessionResp)(nil), // 18: s2a.proto.v2.SessionResp ++ (*GetTlsConfigurationResp_ClientTlsConfiguration)(nil), // 19: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration ++ (*GetTlsConfigurationResp_ServerTlsConfiguration)(nil), // 20: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration ++ (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer ++ (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer ++ (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol ++ (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity ++ (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide ++ (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext ++ (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion ++ (common_go_proto.Ciphersuite)(0), // 28: s2a.proto.v2.Ciphersuite ++} ++var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ ++ 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol ++ 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity ++ 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide ++ 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration ++ 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration ++ 2, // 5: s2a.proto.v2.OffloadPrivateKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation ++ 0, // 6: s2a.proto.v2.OffloadPrivateKeyOperationReq.signature_algorithm:type_name -> s2a.proto.v2.SignatureAlgorithm ++ 3, // 7: s2a.proto.v2.OffloadResumptionKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation ++ 4, // 8: s2a.proto.v2.ValidatePeerCertificateChainReq.mode:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode ++ 21, // 9: s2a.proto.v2.ValidatePeerCertificateChainReq.client_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer ++ 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer ++ 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult ++ 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext ++ 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity ++ 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism ++ 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq ++ 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq ++ 13, // 17: s2a.proto.v2.SessionReq.offload_resumption_key_operation_req:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq ++ 15, // 18: s2a.proto.v2.SessionReq.validate_peer_certificate_chain_req:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq ++ 8, // 19: s2a.proto.v2.SessionResp.status:type_name -> s2a.proto.v2.Status ++ 10, // 20: s2a.proto.v2.SessionResp.get_tls_configuration_resp:type_name -> s2a.proto.v2.GetTlsConfigurationResp ++ 12, // 21: s2a.proto.v2.SessionResp.offload_private_key_operation_resp:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationResp ++ 14, // 22: s2a.proto.v2.SessionResp.offload_resumption_key_operation_resp:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationResp ++ 16, // 23: s2a.proto.v2.SessionResp.validate_peer_certificate_chain_resp:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp ++ 27, // 24: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion ++ 27, // 25: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion ++ 28, // 26: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite ++ 6, // 27: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy ++ 27, // 28: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion ++ 27, // 29: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion ++ 28, // 30: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite ++ 1, // 31: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.request_client_certificate:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate ++ 6, // 32: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy ++ 17, // 33: s2a.proto.v2.S2AService.SetUpSession:input_type -> s2a.proto.v2.SessionReq ++ 18, // 34: s2a.proto.v2.S2AService.SetUpSession:output_type -> s2a.proto.v2.SessionResp ++ 34, // [34:35] is the sub-list for method output_type ++ 33, // [33:34] is the sub-list for method input_type ++ 33, // [33:33] is the sub-list for extension type_name ++ 33, // [33:33] is the sub-list for extension extendee ++ 0, // [0:33] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_v2_s2a_s2a_proto_init() } ++func file_internal_proto_v2_s2a_s2a_proto_init() { ++ if File_internal_proto_v2_s2a_s2a_proto != nil { ++ return ++ } ++ if !protoimpl.UnsafeEnabled { ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*AlpnPolicy); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*AuthenticationMechanism); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*Status); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*GetTlsConfigurationReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*GetTlsConfigurationResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*OffloadPrivateKeyOperationReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*OffloadPrivateKeyOperationResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*OffloadResumptionKeyOperationReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*OffloadResumptionKeyOperationResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ValidatePeerCertificateChainReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ValidatePeerCertificateChainResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ ++ (*AuthenticationMechanism_Token)(nil), ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ ++ (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), ++ (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ ++ (*OffloadPrivateKeyOperationReq_RawBytes)(nil), ++ (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), ++ (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), ++ (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ ++ (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), ++ (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ ++ (*SessionReq_GetTlsConfigurationReq)(nil), ++ (*SessionReq_OffloadPrivateKeyOperationReq)(nil), ++ (*SessionReq_OffloadResumptionKeyOperationReq)(nil), ++ (*SessionReq_ValidatePeerCertificateChainReq)(nil), ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ ++ (*SessionResp_GetTlsConfigurationResp)(nil), ++ (*SessionResp_OffloadPrivateKeyOperationResp)(nil), ++ (*SessionResp_OffloadResumptionKeyOperationResp)(nil), ++ (*SessionResp_ValidatePeerCertificateChainResp)(nil), ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_v2_s2a_s2a_proto_rawDesc, ++ NumEnums: 6, ++ NumMessages: 17, ++ NumExtensions: 0, ++ NumServices: 1, ++ }, ++ GoTypes: file_internal_proto_v2_s2a_s2a_proto_goTypes, ++ DependencyIndexes: file_internal_proto_v2_s2a_s2a_proto_depIdxs, ++ EnumInfos: file_internal_proto_v2_s2a_s2a_proto_enumTypes, ++ MessageInfos: file_internal_proto_v2_s2a_s2a_proto_msgTypes, ++ }.Build() ++ File_internal_proto_v2_s2a_s2a_proto = out.File ++ file_internal_proto_v2_s2a_s2a_proto_rawDesc = nil ++ file_internal_proto_v2_s2a_s2a_proto_goTypes = nil ++ file_internal_proto_v2_s2a_s2a_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +new file mode 100644 +index 00000000000..2566df6c304 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +@@ -0,0 +1,159 @@ ++// Copyright 2022 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go-grpc. DO NOT EDIT. ++// versions: ++// - protoc-gen-go-grpc v1.3.0 ++// - protoc v3.21.12 ++// source: internal/proto/v2/s2a/s2a.proto ++ ++package s2a_go_proto ++ ++import ( ++ context "context" ++ grpc "google.golang.org/grpc" ++ codes "google.golang.org/grpc/codes" ++ status "google.golang.org/grpc/status" ++) ++ ++// This is a compile-time assertion to ensure that this generated file ++// is compatible with the grpc package it is being compiled against. ++// Requires gRPC-Go v1.32.0 or later. ++const _ = grpc.SupportPackageIsVersion7 ++ ++const ( ++ S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" ++) ++ ++// S2AServiceClient is the client API for S2AService service. ++// ++// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. ++type S2AServiceClient interface { ++ // SetUpSession is a bidirectional stream used by applications to offload ++ // operations from the TLS handshake. ++ SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) ++} ++ ++type s2AServiceClient struct { ++ cc grpc.ClientConnInterface ++} ++ ++func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { ++ return &s2AServiceClient{cc} ++} ++ ++func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { ++ stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) ++ if err != nil { ++ return nil, err ++ } ++ x := &s2AServiceSetUpSessionClient{stream} ++ return x, nil ++} ++ ++type S2AService_SetUpSessionClient interface { ++ Send(*SessionReq) error ++ Recv() (*SessionResp, error) ++ grpc.ClientStream ++} ++ ++type s2AServiceSetUpSessionClient struct { ++ grpc.ClientStream ++} ++ ++func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { ++ return x.ClientStream.SendMsg(m) ++} ++ ++func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { ++ m := new(SessionResp) ++ if err := x.ClientStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++// S2AServiceServer is the server API for S2AService service. ++// All implementations must embed UnimplementedS2AServiceServer ++// for forward compatibility ++type S2AServiceServer interface { ++ // SetUpSession is a bidirectional stream used by applications to offload ++ // operations from the TLS handshake. ++ SetUpSession(S2AService_SetUpSessionServer) error ++ mustEmbedUnimplementedS2AServiceServer() ++} ++ ++// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. ++type UnimplementedS2AServiceServer struct { ++} ++ ++func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { ++ return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") ++} ++func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} ++ ++// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. ++// Use of this interface is not recommended, as added methods to S2AServiceServer will ++// result in compilation errors. ++type UnsafeS2AServiceServer interface { ++ mustEmbedUnimplementedS2AServiceServer() ++} ++ ++func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { ++ s.RegisterService(&S2AService_ServiceDesc, srv) ++} ++ ++func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { ++ return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) ++} ++ ++type S2AService_SetUpSessionServer interface { ++ Send(*SessionResp) error ++ Recv() (*SessionReq, error) ++ grpc.ServerStream ++} ++ ++type s2AServiceSetUpSessionServer struct { ++ grpc.ServerStream ++} ++ ++func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { ++ return x.ServerStream.SendMsg(m) ++} ++ ++func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { ++ m := new(SessionReq) ++ if err := x.ServerStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. ++// It's only intended for direct use with grpc.RegisterService, ++// and not to be introspected or modified (even as a copy) ++var S2AService_ServiceDesc = grpc.ServiceDesc{ ++ ServiceName: "s2a.proto.v2.S2AService", ++ HandlerType: (*S2AServiceServer)(nil), ++ Methods: []grpc.MethodDesc{}, ++ Streams: []grpc.StreamDesc{ ++ { ++ StreamName: "SetUpSession", ++ Handler: _S2AService_SetUpSession_Handler, ++ ServerStreams: true, ++ ClientStreams: true, ++ }, ++ }, ++ Metadata: "internal/proto/v2/s2a/s2a.proto", ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go +new file mode 100644 +index 00000000000..486f4ec4f2a +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go +@@ -0,0 +1,34 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package aeadcrypter provides the interface for AEAD cipher implementations ++// used by S2A's record protocol. ++package aeadcrypter ++ ++// S2AAEADCrypter is the interface for an AEAD cipher used by the S2A record ++// protocol. ++type S2AAEADCrypter interface { ++ // Encrypt encrypts the plaintext and computes the tag of dst and plaintext. ++ // dst and plaintext may fully overlap or not at all. ++ Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) ++ // Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may ++ // fully overlap or not at all. ++ Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) ++ // TagSize returns the tag size in bytes. ++ TagSize() int ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go +new file mode 100644 +index 00000000000..85c4e595d75 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go +@@ -0,0 +1,70 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package aeadcrypter ++ ++import ( ++ "crypto/aes" ++ "crypto/cipher" ++ "fmt" ++) ++ ++// Supported key sizes in bytes. ++const ( ++ AES128GCMKeySize = 16 ++ AES256GCMKeySize = 32 ++) ++ ++// aesgcm is the struct that holds an AES-GCM cipher for the S2A AEAD crypter. ++type aesgcm struct { ++ aead cipher.AEAD ++} ++ ++// NewAESGCM creates an AES-GCM crypter instance. Note that the key must be ++// either 128 bits or 256 bits. ++func NewAESGCM(key []byte) (S2AAEADCrypter, error) { ++ if len(key) != AES128GCMKeySize && len(key) != AES256GCMKeySize { ++ return nil, fmt.Errorf("%d or %d bytes, given: %d", AES128GCMKeySize, AES256GCMKeySize, len(key)) ++ } ++ c, err := aes.NewCipher(key) ++ if err != nil { ++ return nil, err ++ } ++ a, err := cipher.NewGCM(c) ++ if err != nil { ++ return nil, err ++ } ++ return &aesgcm{aead: a}, nil ++} ++ ++// Encrypt is the encryption function. dst can contain bytes at the beginning of ++// the ciphertext that will not be encrypted but will be authenticated. If dst ++// has enough capacity to hold these bytes, the ciphertext and the tag, no ++// allocation and copy operations will be performed. dst and plaintext may ++// fully overlap or not at all. ++func (s *aesgcm) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { ++ return encrypt(s.aead, dst, plaintext, nonce, aad) ++} ++ ++func (s *aesgcm) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { ++ return decrypt(s.aead, dst, ciphertext, nonce, aad) ++} ++ ++func (s *aesgcm) TagSize() int { ++ return TagSize ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go +new file mode 100644 +index 00000000000..214df4ca415 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go +@@ -0,0 +1,67 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package aeadcrypter ++ ++import ( ++ "crypto/cipher" ++ "fmt" ++ ++ "golang.org/x/crypto/chacha20poly1305" ++) ++ ++// Supported key size in bytes. ++const ( ++ Chacha20Poly1305KeySize = 32 ++) ++ ++// chachapoly is the struct that holds a CHACHA-POLY cipher for the S2A AEAD ++// crypter. ++type chachapoly struct { ++ aead cipher.AEAD ++} ++ ++// NewChachaPoly creates a Chacha-Poly crypter instance. Note that the key must ++// be Chacha20Poly1305KeySize bytes in length. ++func NewChachaPoly(key []byte) (S2AAEADCrypter, error) { ++ if len(key) != Chacha20Poly1305KeySize { ++ return nil, fmt.Errorf("%d bytes, given: %d", Chacha20Poly1305KeySize, len(key)) ++ } ++ c, err := chacha20poly1305.New(key) ++ if err != nil { ++ return nil, err ++ } ++ return &chachapoly{aead: c}, nil ++} ++ ++// Encrypt is the encryption function. dst can contain bytes at the beginning of ++// the ciphertext that will not be encrypted but will be authenticated. If dst ++// has enough capacity to hold these bytes, the ciphertext and the tag, no ++// allocation and copy operations will be performed. dst and plaintext may ++// fully overlap or not at all. ++func (s *chachapoly) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { ++ return encrypt(s.aead, dst, plaintext, nonce, aad) ++} ++ ++func (s *chachapoly) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { ++ return decrypt(s.aead, dst, ciphertext, nonce, aad) ++} ++ ++func (s *chachapoly) TagSize() int { ++ return TagSize ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go +new file mode 100644 +index 00000000000..b3c36ad95dc +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go +@@ -0,0 +1,92 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package aeadcrypter ++ ++import ( ++ "crypto/cipher" ++ "fmt" ++) ++ ++const ( ++ // TagSize is the tag size in bytes for AES-128-GCM-SHA256, ++ // AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. ++ TagSize = 16 ++ // NonceSize is the size of the nonce in number of bytes for ++ // AES-128-GCM-SHA256, AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. ++ NonceSize = 12 ++ // SHA256DigestSize is the digest size of sha256 in bytes. ++ SHA256DigestSize = 32 ++ // SHA384DigestSize is the digest size of sha384 in bytes. ++ SHA384DigestSize = 48 ++) ++ ++// sliceForAppend takes a slice and a requested number of bytes. It returns a ++// slice with the contents of the given slice followed by that many bytes and a ++// second slice that aliases into it and contains only the extra bytes. If the ++// original slice has sufficient capacity then no allocation is performed. ++func sliceForAppend(in []byte, n int) (head, tail []byte) { ++ if total := len(in) + n; cap(in) >= total { ++ head = in[:total] ++ } else { ++ head = make([]byte, total) ++ copy(head, in) ++ } ++ tail = head[len(in):] ++ return head, tail ++} ++ ++// encrypt is the encryption function for an AEAD crypter. aead determines ++// the type of AEAD crypter. dst can contain bytes at the beginning of the ++// ciphertext that will not be encrypted but will be authenticated. If dst has ++// enough capacity to hold these bytes, the ciphertext and the tag, no ++// allocation and copy operations will be performed. dst and plaintext may ++// fully overlap or not at all. ++func encrypt(aead cipher.AEAD, dst, plaintext, nonce, aad []byte) ([]byte, error) { ++ if len(nonce) != NonceSize { ++ return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) ++ } ++ // If we need to allocate an output buffer, we want to include space for ++ // the tag to avoid forcing the caller to reallocate as well. ++ dlen := len(dst) ++ dst, out := sliceForAppend(dst, len(plaintext)+TagSize) ++ data := out[:len(plaintext)] ++ copy(data, plaintext) // data may fully overlap plaintext ++ ++ // Seal appends the ciphertext and the tag to its first argument and ++ // returns the updated slice. However, sliceForAppend above ensures that ++ // dst has enough capacity to avoid a reallocation and copy due to the ++ // append. ++ dst = aead.Seal(dst[:dlen], nonce, data, aad) ++ return dst, nil ++} ++ ++// decrypt is the decryption function for an AEAD crypter, where aead determines ++// the type of AEAD crypter, and dst the destination bytes for the decrypted ++// ciphertext. The dst buffer may fully overlap with plaintext or not at all. ++func decrypt(aead cipher.AEAD, dst, ciphertext, nonce, aad []byte) ([]byte, error) { ++ if len(nonce) != NonceSize { ++ return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) ++ } ++ // If dst is equal to ciphertext[:0], ciphertext storage is reused. ++ plaintext, err := aead.Open(dst, nonce, ciphertext, aad) ++ if err != nil { ++ return nil, fmt.Errorf("message auth failed: %v", err) ++ } ++ return plaintext, nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go +new file mode 100644 +index 00000000000..ddeaa6d77d7 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go +@@ -0,0 +1,98 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package halfconn ++ ++import ( ++ "crypto/sha256" ++ "crypto/sha512" ++ "fmt" ++ "hash" ++ ++ s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ "github.com/google/s2a-go/internal/record/internal/aeadcrypter" ++) ++ ++// ciphersuite is the interface for retrieving ciphersuite-specific information ++// and utilities. ++type ciphersuite interface { ++ // keySize returns the key size in bytes. This refers to the key used by ++ // the AEAD crypter. This is derived by calling HKDF expand on the traffic ++ // secret. ++ keySize() int ++ // nonceSize returns the nonce size in bytes. ++ nonceSize() int ++ // trafficSecretSize returns the traffic secret size in bytes. This refers ++ // to the secret used to derive the traffic key and nonce, as specified in ++ // https://tools.ietf.org/html/rfc8446#section-7. ++ trafficSecretSize() int ++ // hashFunction returns the hash function for the ciphersuite. ++ hashFunction() func() hash.Hash ++ // aeadCrypter takes a key and creates an AEAD crypter for the ciphersuite ++ // using that key. ++ aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) ++} ++ ++func newCiphersuite(ciphersuite s2apb.Ciphersuite) (ciphersuite, error) { ++ switch ciphersuite { ++ case s2apb.Ciphersuite_AES_128_GCM_SHA256: ++ return &aesgcm128sha256{}, nil ++ case s2apb.Ciphersuite_AES_256_GCM_SHA384: ++ return &aesgcm256sha384{}, nil ++ case s2apb.Ciphersuite_CHACHA20_POLY1305_SHA256: ++ return &chachapolysha256{}, nil ++ default: ++ return nil, fmt.Errorf("unrecognized ciphersuite: %v", ciphersuite) ++ } ++} ++ ++// aesgcm128sha256 is the AES-128-GCM-SHA256 implementation of the ciphersuite ++// interface. ++type aesgcm128sha256 struct{} ++ ++func (aesgcm128sha256) keySize() int { return aeadcrypter.AES128GCMKeySize } ++func (aesgcm128sha256) nonceSize() int { return aeadcrypter.NonceSize } ++func (aesgcm128sha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } ++func (aesgcm128sha256) hashFunction() func() hash.Hash { return sha256.New } ++func (aesgcm128sha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { ++ return aeadcrypter.NewAESGCM(key) ++} ++ ++// aesgcm256sha384 is the AES-256-GCM-SHA384 implementation of the ciphersuite ++// interface. ++type aesgcm256sha384 struct{} ++ ++func (aesgcm256sha384) keySize() int { return aeadcrypter.AES256GCMKeySize } ++func (aesgcm256sha384) nonceSize() int { return aeadcrypter.NonceSize } ++func (aesgcm256sha384) trafficSecretSize() int { return aeadcrypter.SHA384DigestSize } ++func (aesgcm256sha384) hashFunction() func() hash.Hash { return sha512.New384 } ++func (aesgcm256sha384) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { ++ return aeadcrypter.NewAESGCM(key) ++} ++ ++// chachapolysha256 is the ChaChaPoly-SHA256 implementation of the ciphersuite ++// interface. ++type chachapolysha256 struct{} ++ ++func (chachapolysha256) keySize() int { return aeadcrypter.Chacha20Poly1305KeySize } ++func (chachapolysha256) nonceSize() int { return aeadcrypter.NonceSize } ++func (chachapolysha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } ++func (chachapolysha256) hashFunction() func() hash.Hash { return sha256.New } ++func (chachapolysha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { ++ return aeadcrypter.NewChachaPoly(key) ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go +new file mode 100644 +index 00000000000..9499cdca759 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go +@@ -0,0 +1,60 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package halfconn ++ ++import "errors" ++ ++// counter is a 64-bit counter. ++type counter struct { ++ val uint64 ++ hasOverflowed bool ++} ++ ++// newCounter creates a new counter with the initial value set to val. ++func newCounter(val uint64) counter { ++ return counter{val: val} ++} ++ ++// value returns the current value of the counter. ++func (c *counter) value() (uint64, error) { ++ if c.hasOverflowed { ++ return 0, errors.New("counter has overflowed") ++ } ++ return c.val, nil ++} ++ ++// increment increments the counter and checks for overflow. ++func (c *counter) increment() { ++ // If the counter is already invalid due to overflow, there is no need to ++ // increase it. We check for the hasOverflowed flag in the call to value(). ++ if c.hasOverflowed { ++ return ++ } ++ c.val++ ++ if c.val == 0 { ++ c.hasOverflowed = true ++ } ++} ++ ++// reset sets the counter value to zero and sets the hasOverflowed flag to ++// false. ++func (c *counter) reset() { ++ c.val = 0 ++ c.hasOverflowed = false ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go +new file mode 100644 +index 00000000000..e05f2c36a6d +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go +@@ -0,0 +1,59 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package halfconn ++ ++import ( ++ "fmt" ++ "hash" ++ ++ "golang.org/x/crypto/hkdf" ++) ++ ++// hkdfExpander is the interface for the HKDF expansion function; see ++// https://tools.ietf.org/html/rfc5869 for details. its use in TLS 1.3 is ++// specified in https://tools.ietf.org/html/rfc8446#section-7.2 ++type hkdfExpander interface { ++ // expand takes a secret, a label, and the output length in bytes, and ++ // returns the resulting expanded key. ++ expand(secret, label []byte, length int) ([]byte, error) ++} ++ ++// defaultHKDFExpander is the default HKDF expander which uses Go's crypto/hkdf ++// for HKDF expansion. ++type defaultHKDFExpander struct { ++ h func() hash.Hash ++} ++ ++// newDefaultHKDFExpander creates an instance of the default HKDF expander ++// using the given hash function. ++func newDefaultHKDFExpander(h func() hash.Hash) hkdfExpander { ++ return &defaultHKDFExpander{h: h} ++} ++ ++func (d *defaultHKDFExpander) expand(secret, label []byte, length int) ([]byte, error) { ++ outBuf := make([]byte, length) ++ n, err := hkdf.Expand(d.h, secret, label).Read(outBuf) ++ if err != nil { ++ return nil, fmt.Errorf("hkdf.Expand.Read failed with error: %v", err) ++ } ++ if n < length { ++ return nil, fmt.Errorf("hkdf.Expand.Read returned unexpected length, got %d, want %d", n, length) ++ } ++ return outBuf, nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go +new file mode 100644 +index 00000000000..dff99ff5940 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go +@@ -0,0 +1,193 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package halfconn manages the inbound or outbound traffic of a TLS 1.3 ++// connection. ++package halfconn ++ ++import ( ++ "fmt" ++ "sync" ++ ++ s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ "github.com/google/s2a-go/internal/record/internal/aeadcrypter" ++ "golang.org/x/crypto/cryptobyte" ++) ++ ++// The constants below were taken from Section 7.2 and 7.3 in ++// https://tools.ietf.org/html/rfc8446#section-7. They are used as the label ++// in HKDF-Expand-Label. ++const ( ++ tls13Key = "tls13 key" ++ tls13Nonce = "tls13 iv" ++ tls13Update = "tls13 traffic upd" ++) ++ ++// S2AHalfConnection stores the state of the TLS 1.3 connection in the ++// inbound or outbound direction. ++type S2AHalfConnection struct { ++ cs ciphersuite ++ expander hkdfExpander ++ // mutex guards sequence, aeadCrypter, trafficSecret, and nonce. ++ mutex sync.Mutex ++ aeadCrypter aeadcrypter.S2AAEADCrypter ++ sequence counter ++ trafficSecret []byte ++ nonce []byte ++} ++ ++// New creates a new instance of S2AHalfConnection given a ciphersuite and a ++// traffic secret. ++func New(ciphersuite s2apb.Ciphersuite, trafficSecret []byte, sequence uint64) (*S2AHalfConnection, error) { ++ cs, err := newCiphersuite(ciphersuite) ++ if err != nil { ++ return nil, fmt.Errorf("failed to create new ciphersuite: %v", ciphersuite) ++ } ++ if cs.trafficSecretSize() != len(trafficSecret) { ++ return nil, fmt.Errorf("supplied traffic secret must be %v bytes, given: %v bytes", cs.trafficSecretSize(), len(trafficSecret)) ++ } ++ ++ hc := &S2AHalfConnection{cs: cs, expander: newDefaultHKDFExpander(cs.hashFunction()), sequence: newCounter(sequence), trafficSecret: trafficSecret} ++ if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { ++ return nil, fmt.Errorf("failed to create half connection using traffic secret: %v", err) ++ } ++ ++ return hc, nil ++} ++ ++// Encrypt encrypts the plaintext and computes the tag of dst and plaintext. ++// dst and plaintext may fully overlap or not at all. Note that the sequence ++// number will still be incremented on failure, unless the sequence has ++// overflowed. ++func (hc *S2AHalfConnection) Encrypt(dst, plaintext, aad []byte) ([]byte, error) { ++ hc.mutex.Lock() ++ sequence, err := hc.getAndIncrementSequence() ++ if err != nil { ++ hc.mutex.Unlock() ++ return nil, err ++ } ++ nonce := hc.maskedNonce(sequence) ++ crypter := hc.aeadCrypter ++ hc.mutex.Unlock() ++ return crypter.Encrypt(dst, plaintext, nonce, aad) ++} ++ ++// Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may ++// fully overlap or not at all. Note that the sequence number will still be ++// incremented on failure, unless the sequence has overflowed. ++func (hc *S2AHalfConnection) Decrypt(dst, ciphertext, aad []byte) ([]byte, error) { ++ hc.mutex.Lock() ++ sequence, err := hc.getAndIncrementSequence() ++ if err != nil { ++ hc.mutex.Unlock() ++ return nil, err ++ } ++ nonce := hc.maskedNonce(sequence) ++ crypter := hc.aeadCrypter ++ hc.mutex.Unlock() ++ return crypter.Decrypt(dst, ciphertext, nonce, aad) ++} ++ ++// UpdateKey advances the traffic secret key, as specified in ++// https://tools.ietf.org/html/rfc8446#section-7.2. In addition, it derives ++// a new key and nonce, and resets the sequence number. ++func (hc *S2AHalfConnection) UpdateKey() error { ++ hc.mutex.Lock() ++ defer hc.mutex.Unlock() ++ ++ var err error ++ hc.trafficSecret, err = hc.deriveSecret(hc.trafficSecret, []byte(tls13Update), hc.cs.trafficSecretSize()) ++ if err != nil { ++ return fmt.Errorf("failed to derive traffic secret: %v", err) ++ } ++ ++ if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { ++ return fmt.Errorf("failed to update half connection: %v", err) ++ } ++ ++ hc.sequence.reset() ++ return nil ++} ++ ++// TagSize returns the tag size in bytes of the underlying AEAD crypter. ++func (hc *S2AHalfConnection) TagSize() int { ++ return hc.aeadCrypter.TagSize() ++} ++ ++// updateCrypterAndNonce takes a new traffic secret and updates the crypter ++// and nonce. Note that the mutex must be held while calling this function. ++func (hc *S2AHalfConnection) updateCrypterAndNonce(newTrafficSecret []byte) error { ++ key, err := hc.deriveSecret(newTrafficSecret, []byte(tls13Key), hc.cs.keySize()) ++ if err != nil { ++ return fmt.Errorf("failed to update key: %v", err) ++ } ++ ++ hc.nonce, err = hc.deriveSecret(newTrafficSecret, []byte(tls13Nonce), hc.cs.nonceSize()) ++ if err != nil { ++ return fmt.Errorf("failed to update nonce: %v", err) ++ } ++ ++ hc.aeadCrypter, err = hc.cs.aeadCrypter(key) ++ if err != nil { ++ return fmt.Errorf("failed to update AEAD crypter: %v", err) ++ } ++ return nil ++} ++ ++// getAndIncrement returns the current sequence number and increments it. Note ++// that the mutex must be held while calling this function. ++func (hc *S2AHalfConnection) getAndIncrementSequence() (uint64, error) { ++ sequence, err := hc.sequence.value() ++ if err != nil { ++ return 0, err ++ } ++ hc.sequence.increment() ++ return sequence, nil ++} ++ ++// maskedNonce creates a copy of the nonce that is masked with the sequence ++// number. Note that the mutex must be held while calling this function. ++func (hc *S2AHalfConnection) maskedNonce(sequence uint64) []byte { ++ const uint64Size = 8 ++ nonce := make([]byte, len(hc.nonce)) ++ copy(nonce, hc.nonce) ++ for i := 0; i < uint64Size; i++ { ++ nonce[aeadcrypter.NonceSize-uint64Size+i] ^= byte(sequence >> uint64(56-uint64Size*i)) ++ } ++ return nonce ++} ++ ++// deriveSecret implements the Derive-Secret function, as specified in ++// https://tools.ietf.org/html/rfc8446#section-7.1. ++func (hc *S2AHalfConnection) deriveSecret(secret, label []byte, length int) ([]byte, error) { ++ var hkdfLabel cryptobyte.Builder ++ hkdfLabel.AddUint16(uint16(length)) ++ hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { ++ b.AddBytes(label) ++ }) ++ // Append an empty `Context` field to the label, as specified in the RFC. ++ // The half connection does not use the `Context` field. ++ hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { ++ b.AddBytes([]byte("")) ++ }) ++ hkdfLabelBytes, err := hkdfLabel.Bytes() ++ if err != nil { ++ return nil, fmt.Errorf("deriveSecret failed: %v", err) ++ } ++ return hc.expander.expand(secret, hkdfLabelBytes, length) ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/record.go b/vendor/github.com/google/s2a-go/internal/record/record.go +new file mode 100644 +index 00000000000..c60515510a7 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/record.go +@@ -0,0 +1,757 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package record implements the TLS 1.3 record protocol used by the S2A ++// transport credentials. ++package record ++ ++import ( ++ "encoding/binary" ++ "errors" ++ "fmt" ++ "math" ++ "net" ++ "sync" ++ ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ "github.com/google/s2a-go/internal/record/internal/halfconn" ++ "github.com/google/s2a-go/internal/tokenmanager" ++ "google.golang.org/grpc/grpclog" ++) ++ ++// recordType is the `ContentType` as described in ++// https://tools.ietf.org/html/rfc8446#section-5.1. ++type recordType byte ++ ++const ( ++ alert recordType = 21 ++ handshake recordType = 22 ++ applicationData recordType = 23 ++) ++ ++// keyUpdateRequest is the `KeyUpdateRequest` as described in ++// https://tools.ietf.org/html/rfc8446#section-4.6.3. ++type keyUpdateRequest byte ++ ++const ( ++ updateNotRequested keyUpdateRequest = 0 ++ updateRequested keyUpdateRequest = 1 ++) ++ ++// alertDescription is the `AlertDescription` as described in ++// https://tools.ietf.org/html/rfc8446#section-6. ++type alertDescription byte ++ ++const ( ++ closeNotify alertDescription = 0 ++) ++ ++// sessionTicketState is used to determine whether session tickets have not yet ++// been received, are in the process of being received, or have finished ++// receiving. ++type sessionTicketState byte ++ ++const ( ++ ticketsNotYetReceived sessionTicketState = 0 ++ receivingTickets sessionTicketState = 1 ++ notReceivingTickets sessionTicketState = 2 ++) ++ ++const ( ++ // The TLS 1.3-specific constants below (tlsRecordMaxPlaintextSize, ++ // tlsRecordHeaderSize, tlsRecordTypeSize) were taken from ++ // https://tools.ietf.org/html/rfc8446#section-5.1. ++ ++ // tlsRecordMaxPlaintextSize is the maximum size in bytes of the plaintext ++ // in a single TLS 1.3 record. ++ tlsRecordMaxPlaintextSize = 16384 // 2^14 ++ // tlsRecordTypeSize is the size in bytes of the TLS 1.3 record type. ++ tlsRecordTypeSize = 1 ++ // tlsTagSize is the size in bytes of the tag of the following three ++ // ciphersuites: AES-128-GCM-SHA256, AES-256-GCM-SHA384, ++ // CHACHA20-POLY1305-SHA256. ++ tlsTagSize = 16 ++ // tlsRecordMaxPayloadSize is the maximum size in bytes of the payload in a ++ // single TLS 1.3 record. This is the maximum size of the plaintext plus the ++ // record type byte and 16 bytes of the tag. ++ tlsRecordMaxPayloadSize = tlsRecordMaxPlaintextSize + tlsRecordTypeSize + tlsTagSize ++ // tlsRecordHeaderTypeSize is the size in bytes of the TLS 1.3 record ++ // header type. ++ tlsRecordHeaderTypeSize = 1 ++ // tlsRecordHeaderLegacyRecordVersionSize is the size in bytes of the TLS ++ // 1.3 record header legacy record version. ++ tlsRecordHeaderLegacyRecordVersionSize = 2 ++ // tlsRecordHeaderPayloadLengthSize is the size in bytes of the TLS 1.3 ++ // record header payload length. ++ tlsRecordHeaderPayloadLengthSize = 2 ++ // tlsRecordHeaderSize is the size in bytes of the TLS 1.3 record header. ++ tlsRecordHeaderSize = tlsRecordHeaderTypeSize + tlsRecordHeaderLegacyRecordVersionSize + tlsRecordHeaderPayloadLengthSize ++ // tlsRecordMaxSize ++ tlsRecordMaxSize = tlsRecordMaxPayloadSize + tlsRecordHeaderSize ++ // tlsApplicationData is the application data type of the TLS 1.3 record ++ // header. ++ tlsApplicationData = 23 ++ // tlsLegacyRecordVersion is the legacy record version of the TLS record. ++ tlsLegacyRecordVersion = 3 ++ // tlsAlertSize is the size in bytes of an alert of TLS 1.3. ++ tlsAlertSize = 2 ++) ++ ++const ( ++ // These are TLS 1.3 handshake-specific constants. ++ ++ // tlsHandshakeNewSessionTicketType is the prefix of a handshake new session ++ // ticket message of TLS 1.3. ++ tlsHandshakeNewSessionTicketType = 4 ++ // tlsHandshakeKeyUpdateType is the prefix of a handshake key update message ++ // of TLS 1.3. ++ tlsHandshakeKeyUpdateType = 24 ++ // tlsHandshakeMsgTypeSize is the size in bytes of the TLS 1.3 handshake ++ // message type field. ++ tlsHandshakeMsgTypeSize = 1 ++ // tlsHandshakeLengthSize is the size in bytes of the TLS 1.3 handshake ++ // message length field. ++ tlsHandshakeLengthSize = 3 ++ // tlsHandshakeKeyUpdateMsgSize is the size in bytes of the TLS 1.3 ++ // handshake key update message. ++ tlsHandshakeKeyUpdateMsgSize = 1 ++ // tlsHandshakePrefixSize is the size in bytes of the prefix of the TLS 1.3 ++ // handshake message. ++ tlsHandshakePrefixSize = 4 ++ // tlsMaxSessionTicketSize is the maximum size of a NewSessionTicket message ++ // in TLS 1.3. This is the sum of the max sizes of all the fields in the ++ // NewSessionTicket struct specified in ++ // https://tools.ietf.org/html/rfc8446#section-4.6.1. ++ tlsMaxSessionTicketSize = 131338 ++) ++ ++const ( ++ // outBufMaxRecords is the maximum number of records that can fit in the ++ // ourRecordsBuf buffer. ++ outBufMaxRecords = 16 ++ // outBufMaxSize is the maximum size (in bytes) of the outRecordsBuf buffer. ++ outBufMaxSize = outBufMaxRecords * tlsRecordMaxSize ++ // maxAllowedTickets is the maximum number of session tickets that are ++ // allowed. The number of tickets are limited to ensure that the size of the ++ // ticket queue does not grow indefinitely. S2A also keeps a limit on the ++ // number of tickets that it caches. ++ maxAllowedTickets = 5 ++) ++ ++// preConstructedKeyUpdateMsg holds the key update message. This is needed as an ++// optimization so that the same message does not need to be constructed every ++// time a key update message is sent. ++var preConstructedKeyUpdateMsg = buildKeyUpdateRequest() ++ ++// conn represents a secured TLS connection. It implements the net.Conn ++// interface. ++type conn struct { ++ net.Conn ++ // inConn is the half connection responsible for decrypting incoming bytes. ++ inConn *halfconn.S2AHalfConnection ++ // outConn is the half connection responsible for encrypting outgoing bytes. ++ outConn *halfconn.S2AHalfConnection ++ // pendingApplicationData holds data that has been read from the connection ++ // and decrypted, but has not yet been returned by Read. ++ pendingApplicationData []byte ++ // unusedBuf holds data read from the network that has not yet been ++ // decrypted. This data might not consist of a complete record. It may ++ // consist of several records, the last of which could be incomplete. ++ unusedBuf []byte ++ // outRecordsBuf is a buffer used to store outgoing TLS records before ++ // they are written to the network. ++ outRecordsBuf []byte ++ // nextRecord stores the next record info in the unusedBuf buffer. ++ nextRecord []byte ++ // overheadSize is the overhead size in bytes of each TLS 1.3 record, which ++ // is computed as overheadSize = header size + record type byte + tag size. ++ // Note that there is no padding by zeros in the overhead calculation. ++ overheadSize int ++ // readMutex guards against concurrent calls to Read. This is required since ++ // Close may be called during a Read. ++ readMutex sync.Mutex ++ // writeMutex guards against concurrent calls to Write. This is required ++ // since Close may be called during a Write, and also because a key update ++ // message may be written during a Read. ++ writeMutex sync.Mutex ++ // handshakeBuf holds handshake messages while they are being processed. ++ handshakeBuf []byte ++ // ticketState is the current processing state of the session tickets. ++ ticketState sessionTicketState ++ // sessionTickets holds the completed session tickets until they are sent to ++ // the handshaker service for processing. ++ sessionTickets [][]byte ++ // ticketSender sends session tickets to the S2A handshaker service. ++ ticketSender s2aTicketSender ++ // callComplete is a channel that blocks closing the record protocol until a ++ // pending call to the S2A completes. ++ callComplete chan bool ++} ++ ++// ConnParameters holds the parameters used for creating a new conn object. ++type ConnParameters struct { ++ // NetConn is the TCP connection to the peer. This parameter is required. ++ NetConn net.Conn ++ // Ciphersuite is the TLS ciphersuite negotiated by the S2A handshaker ++ // service. This parameter is required. ++ Ciphersuite commonpb.Ciphersuite ++ // TLSVersion is the TLS version number negotiated by the S2A handshaker ++ // service. This parameter is required. ++ TLSVersion commonpb.TLSVersion ++ // InTrafficSecret is the traffic secret used to derive the session key for ++ // the inbound direction. This parameter is required. ++ InTrafficSecret []byte ++ // OutTrafficSecret is the traffic secret used to derive the session key ++ // for the outbound direction. This parameter is required. ++ OutTrafficSecret []byte ++ // UnusedBuf is the data read from the network that has not yet been ++ // decrypted. This parameter is optional. If not provided, then no ++ // application data was sent in the same flight of messages as the final ++ // handshake message. ++ UnusedBuf []byte ++ // InSequence is the sequence number of the next, incoming, TLS record. ++ // This parameter is required. ++ InSequence uint64 ++ // OutSequence is the sequence number of the next, outgoing, TLS record. ++ // This parameter is required. ++ OutSequence uint64 ++ // HSAddr stores the address of the S2A handshaker service. This parameter ++ // is optional. If not provided, then TLS resumption is disabled. ++ HSAddr string ++ // ConnectionId is the connection identifier that was created and sent by ++ // S2A at the end of a handshake. ++ ConnectionID uint64 ++ // LocalIdentity is the local identity that was used by S2A during session ++ // setup and included in the session result. ++ LocalIdentity *commonpb.Identity ++ // EnsureProcessSessionTickets allows users to wait and ensure that all ++ // available session tickets are sent to S2A before a process completes. ++ EnsureProcessSessionTickets *sync.WaitGroup ++} ++ ++// NewConn creates a TLS record protocol that wraps the TCP connection. ++func NewConn(o *ConnParameters) (net.Conn, error) { ++ if o == nil { ++ return nil, errors.New("conn options must not be nil") ++ } ++ if o.TLSVersion != commonpb.TLSVersion_TLS1_3 { ++ return nil, errors.New("TLS version must be TLS 1.3") ++ } ++ ++ inConn, err := halfconn.New(o.Ciphersuite, o.InTrafficSecret, o.InSequence) ++ if err != nil { ++ return nil, fmt.Errorf("failed to create inbound half connection: %v", err) ++ } ++ outConn, err := halfconn.New(o.Ciphersuite, o.OutTrafficSecret, o.OutSequence) ++ if err != nil { ++ return nil, fmt.Errorf("failed to create outbound half connection: %v", err) ++ } ++ ++ // The tag size for the in/out connections should be the same. ++ overheadSize := tlsRecordHeaderSize + tlsRecordTypeSize + inConn.TagSize() ++ var unusedBuf []byte ++ if o.UnusedBuf == nil { ++ // We pre-allocate unusedBuf to be of size ++ // 2*tlsRecordMaxSize-1 during initialization. We only read from the ++ // network into unusedBuf when unusedBuf does not contain a complete ++ // record and the incomplete record is at most tlsRecordMaxSize-1 ++ // (bytes). And we read at most tlsRecordMaxSize bytes of data from the ++ // network into unusedBuf at one time. Therefore, 2*tlsRecordMaxSize-1 ++ // is large enough to buffer data read from the network. ++ unusedBuf = make([]byte, 0, 2*tlsRecordMaxSize-1) ++ } else { ++ unusedBuf = make([]byte, len(o.UnusedBuf)) ++ copy(unusedBuf, o.UnusedBuf) ++ } ++ ++ tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() ++ if err != nil { ++ grpclog.Infof("failed to create single token access token manager: %v", err) ++ } ++ ++ s2aConn := &conn{ ++ Conn: o.NetConn, ++ inConn: inConn, ++ outConn: outConn, ++ unusedBuf: unusedBuf, ++ outRecordsBuf: make([]byte, tlsRecordMaxSize), ++ nextRecord: unusedBuf, ++ overheadSize: overheadSize, ++ ticketState: ticketsNotYetReceived, ++ // Pre-allocate the buffer for one session ticket message and the max ++ // plaintext size. This is the largest size that handshakeBuf will need ++ // to hold. The largest incomplete handshake message is the ++ // [handshake header size] + [max session ticket size] - 1. ++ // Then, tlsRecordMaxPlaintextSize is the maximum size that will be ++ // appended to the handshakeBuf before the handshake message is ++ // completed. Therefore, the buffer size below should be large enough to ++ // buffer any handshake messages. ++ handshakeBuf: make([]byte, 0, tlsHandshakePrefixSize+tlsMaxSessionTicketSize+tlsRecordMaxPlaintextSize-1), ++ ticketSender: &ticketSender{ ++ hsAddr: o.HSAddr, ++ connectionID: o.ConnectionID, ++ localIdentity: o.LocalIdentity, ++ tokenManager: tokenManager, ++ ensureProcessSessionTickets: o.EnsureProcessSessionTickets, ++ }, ++ callComplete: make(chan bool), ++ } ++ return s2aConn, nil ++} ++ ++// Read reads and decrypts a TLS 1.3 record from the underlying connection, and ++// copies any application data received from the peer into b. If the size of the ++// payload is greater than len(b), Read retains the remaining bytes in an ++// internal buffer, and subsequent calls to Read will read from this buffer ++// until it is exhausted. At most 1 TLS record worth of application data is ++// written to b for each call to Read. ++// ++// Note that for the user to efficiently call this method, the user should ++// ensure that the buffer b is allocated such that the buffer does not have any ++// unused segments. This can be done by calling Read via io.ReadFull, which ++// continually calls Read until the specified buffer has been filled. Also note ++// that the user should close the connection via Close() if an error is thrown ++// by a call to Read. ++func (p *conn) Read(b []byte) (n int, err error) { ++ p.readMutex.Lock() ++ defer p.readMutex.Unlock() ++ // Check if p.pendingApplication data has leftover application data from ++ // the previous call to Read. ++ if len(p.pendingApplicationData) == 0 { ++ // Read a full record from the wire. ++ record, err := p.readFullRecord() ++ if err != nil { ++ return 0, err ++ } ++ // Now we have a complete record, so split the header and validate it ++ // The TLS record is split into 2 pieces: the record header and the ++ // payload. The payload has the following form: ++ // [payload] = [ciphertext of application data] ++ // + [ciphertext of record type byte] ++ // + [(optionally) ciphertext of padding by zeros] ++ // + [tag] ++ header, payload, err := splitAndValidateHeader(record) ++ if err != nil { ++ return 0, err ++ } ++ // Decrypt the ciphertext. ++ p.pendingApplicationData, err = p.inConn.Decrypt(payload[:0], payload, header) ++ if err != nil { ++ return 0, err ++ } ++ // Remove the padding by zeros and the record type byte from the ++ // p.pendingApplicationData buffer. ++ msgType, err := p.stripPaddingAndType() ++ if err != nil { ++ return 0, err ++ } ++ // Check that the length of the plaintext after stripping the padding ++ // and record type byte is under the maximum plaintext size. ++ if len(p.pendingApplicationData) > tlsRecordMaxPlaintextSize { ++ return 0, errors.New("plaintext size larger than maximum") ++ } ++ // The expected message types are application data, alert, and ++ // handshake. For application data, the bytes are directly copied into ++ // b. For an alert, the type of the alert is checked and the connection ++ // is closed on a close notify alert. For a handshake message, the ++ // handshake message type is checked. The handshake message type can be ++ // a key update type, for which we advance the traffic secret, and a ++ // new session ticket type, for which we send the received ticket to S2A ++ // for processing. ++ switch msgType { ++ case applicationData: ++ if len(p.handshakeBuf) > 0 { ++ return 0, errors.New("application data received while processing fragmented handshake messages") ++ } ++ if p.ticketState == receivingTickets { ++ p.ticketState = notReceivingTickets ++ grpclog.Infof("Sending session tickets to S2A.") ++ p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) ++ } ++ case alert: ++ return 0, p.handleAlertMessage() ++ case handshake: ++ if err = p.handleHandshakeMessage(); err != nil { ++ return 0, err ++ } ++ return 0, nil ++ default: ++ return 0, errors.New("unknown record type") ++ } ++ } ++ // Write as much application data as possible to b, the output buffer. ++ n = copy(b, p.pendingApplicationData) ++ p.pendingApplicationData = p.pendingApplicationData[n:] ++ return n, nil ++} ++ ++// Write divides b into segments of size tlsRecordMaxPlaintextSize, builds a ++// TLS 1.3 record (of type "application data") from each segment, and sends ++// the record to the peer. It returns the number of plaintext bytes that were ++// successfully sent to the peer. ++func (p *conn) Write(b []byte) (n int, err error) { ++ p.writeMutex.Lock() ++ defer p.writeMutex.Unlock() ++ return p.writeTLSRecord(b, tlsApplicationData) ++} ++ ++// writeTLSRecord divides b into segments of size maxPlaintextBytesPerRecord, ++// builds a TLS 1.3 record (of type recordType) from each segment, and sends ++// the record to the peer. It returns the number of plaintext bytes that were ++// successfully sent to the peer. ++func (p *conn) writeTLSRecord(b []byte, recordType byte) (n int, err error) { ++ // Create a record of only header, record type, and tag if given empty ++ // byte array. ++ if len(b) == 0 { ++ recordEndIndex, _, err := p.buildRecord(b, recordType, 0) ++ if err != nil { ++ return 0, err ++ } ++ ++ // Write the bytes stored in outRecordsBuf to p.Conn. Since we return ++ // the number of plaintext bytes written without overhead, we will ++ // always return 0 while p.Conn.Write returns the entire record length. ++ _, err = p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) ++ return 0, err ++ } ++ ++ numRecords := int(math.Ceil(float64(len(b)) / float64(tlsRecordMaxPlaintextSize))) ++ totalRecordsSize := len(b) + numRecords*p.overheadSize ++ partialBSize := len(b) ++ if totalRecordsSize > outBufMaxSize { ++ totalRecordsSize = outBufMaxSize ++ partialBSize = outBufMaxRecords * tlsRecordMaxPlaintextSize ++ } ++ if len(p.outRecordsBuf) < totalRecordsSize { ++ p.outRecordsBuf = make([]byte, totalRecordsSize) ++ } ++ for bStart := 0; bStart < len(b); bStart += partialBSize { ++ bEnd := bStart + partialBSize ++ if bEnd > len(b) { ++ bEnd = len(b) ++ } ++ partialB := b[bStart:bEnd] ++ recordEndIndex := 0 ++ for len(partialB) > 0 { ++ recordEndIndex, partialB, err = p.buildRecord(partialB, recordType, recordEndIndex) ++ if err != nil { ++ // Return the amount of bytes written prior to the error. ++ return bStart, err ++ } ++ } ++ // Write the bytes stored in outRecordsBuf to p.Conn. If there is an ++ // error, calculate the total number of plaintext bytes of complete ++ // records successfully written to the peer and return it. ++ nn, err := p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) ++ if err != nil { ++ numberOfCompletedRecords := int(math.Floor(float64(nn) / float64(tlsRecordMaxSize))) ++ return bStart + numberOfCompletedRecords*tlsRecordMaxPlaintextSize, err ++ } ++ } ++ return len(b), nil ++} ++ ++// buildRecord builds a TLS 1.3 record of type recordType from plaintext, ++// and writes the record to outRecordsBuf at recordStartIndex. The record will ++// have at most tlsRecordMaxPlaintextSize bytes of payload. It returns the ++// index of outRecordsBuf where the current record ends, as well as any ++// remaining plaintext bytes. ++func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex int) (n int, remainingPlaintext []byte, err error) { ++ // Construct the payload, which consists of application data and record type. ++ dataLen := len(plaintext) ++ if dataLen > tlsRecordMaxPlaintextSize { ++ dataLen = tlsRecordMaxPlaintextSize ++ } ++ remainingPlaintext = plaintext[dataLen:] ++ newRecordBuf := p.outRecordsBuf[recordStartIndex:] ++ ++ copy(newRecordBuf[tlsRecordHeaderSize:], plaintext[:dataLen]) ++ newRecordBuf[tlsRecordHeaderSize+dataLen] = recordType ++ payload := newRecordBuf[tlsRecordHeaderSize : tlsRecordHeaderSize+dataLen+1] // 1 is for the recordType. ++ // Construct the header. ++ newRecordBuf[0] = tlsApplicationData ++ newRecordBuf[1] = tlsLegacyRecordVersion ++ newRecordBuf[2] = tlsLegacyRecordVersion ++ binary.BigEndian.PutUint16(newRecordBuf[3:], uint16(len(payload)+tlsTagSize)) ++ header := newRecordBuf[:tlsRecordHeaderSize] ++ ++ // Encrypt the payload using header as aad. ++ encryptedPayload, err := p.outConn.Encrypt(newRecordBuf[tlsRecordHeaderSize:][:0], payload, header) ++ if err != nil { ++ return 0, plaintext, err ++ } ++ recordStartIndex += len(header) + len(encryptedPayload) ++ return recordStartIndex, remainingPlaintext, nil ++} ++ ++func (p *conn) Close() error { ++ p.readMutex.Lock() ++ defer p.readMutex.Unlock() ++ p.writeMutex.Lock() ++ defer p.writeMutex.Unlock() ++ // If p.ticketState is equal to notReceivingTickets, then S2A has ++ // been sent a flight of session tickets, and we must wait for the ++ // call to S2A to complete before closing the record protocol. ++ if p.ticketState == notReceivingTickets { ++ <-p.callComplete ++ grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") ++ } ++ return p.Conn.Close() ++} ++ ++// stripPaddingAndType strips the padding by zeros and record type from ++// p.pendingApplicationData and returns the record type. Note that ++// p.pendingApplicationData should be of the form: ++// [application data] + [record type byte] + [trailing zeros] ++func (p *conn) stripPaddingAndType() (recordType, error) { ++ if len(p.pendingApplicationData) == 0 { ++ return 0, errors.New("application data had length 0") ++ } ++ i := len(p.pendingApplicationData) - 1 ++ // Search for the index of the record type byte. ++ for i > 0 { ++ if p.pendingApplicationData[i] != 0 { ++ break ++ } ++ i-- ++ } ++ rt := recordType(p.pendingApplicationData[i]) ++ p.pendingApplicationData = p.pendingApplicationData[:i] ++ return rt, nil ++} ++ ++// readFullRecord reads from the wire until a record is completed and returns ++// the full record. ++func (p *conn) readFullRecord() (fullRecord []byte, err error) { ++ fullRecord, p.nextRecord, err = parseReadBuffer(p.nextRecord, tlsRecordMaxPayloadSize) ++ if err != nil { ++ return nil, err ++ } ++ // Check whether the next record to be decrypted has been completely ++ // received. ++ if len(fullRecord) == 0 { ++ copy(p.unusedBuf, p.nextRecord) ++ p.unusedBuf = p.unusedBuf[:len(p.nextRecord)] ++ // Always copy next incomplete record to the beginning of the ++ // unusedBuf buffer and reset nextRecord to it. ++ p.nextRecord = p.unusedBuf ++ } ++ // Keep reading from the wire until we have a complete record. ++ for len(fullRecord) == 0 { ++ if len(p.unusedBuf) == cap(p.unusedBuf) { ++ tmp := make([]byte, len(p.unusedBuf), cap(p.unusedBuf)+tlsRecordMaxSize) ++ copy(tmp, p.unusedBuf) ++ p.unusedBuf = tmp ++ } ++ n, err := p.Conn.Read(p.unusedBuf[len(p.unusedBuf):min(cap(p.unusedBuf), len(p.unusedBuf)+tlsRecordMaxSize)]) ++ if err != nil { ++ return nil, err ++ } ++ p.unusedBuf = p.unusedBuf[:len(p.unusedBuf)+n] ++ fullRecord, p.nextRecord, err = parseReadBuffer(p.unusedBuf, tlsRecordMaxPayloadSize) ++ if err != nil { ++ return nil, err ++ } ++ } ++ return fullRecord, nil ++} ++ ++// parseReadBuffer parses the provided buffer and returns a full record and any ++// remaining bytes in that buffer. If the record is incomplete, nil is returned ++// for the first return value and the given byte buffer is returned for the ++// second return value. The length of the payload specified by the header should ++// not be greater than maxLen, otherwise an error is returned. Note that this ++// function does not allocate or copy any buffers. ++func parseReadBuffer(b []byte, maxLen uint16) (fullRecord, remaining []byte, err error) { ++ // If the header is not complete, return the provided buffer as remaining ++ // buffer. ++ if len(b) < tlsRecordHeaderSize { ++ return nil, b, nil ++ } ++ msgLenField := b[tlsRecordHeaderTypeSize+tlsRecordHeaderLegacyRecordVersionSize : tlsRecordHeaderSize] ++ length := binary.BigEndian.Uint16(msgLenField) ++ if length > maxLen { ++ return nil, nil, fmt.Errorf("record length larger than the limit %d", maxLen) ++ } ++ if len(b) < int(length)+tlsRecordHeaderSize { ++ // Record is not complete yet. ++ return nil, b, nil ++ } ++ return b[:tlsRecordHeaderSize+length], b[tlsRecordHeaderSize+length:], nil ++} ++ ++// splitAndValidateHeader splits the header from the payload in the TLS 1.3 ++// record and returns them. Note that the header is checked for validity, and an ++// error is returned when an invalid header is parsed. Also note that this ++// function does not allocate or copy any buffers. ++func splitAndValidateHeader(record []byte) (header, payload []byte, err error) { ++ if len(record) < tlsRecordHeaderSize { ++ return nil, nil, fmt.Errorf("record was smaller than the header size") ++ } ++ header = record[:tlsRecordHeaderSize] ++ payload = record[tlsRecordHeaderSize:] ++ if header[0] != tlsApplicationData { ++ return nil, nil, fmt.Errorf("incorrect type in the header") ++ } ++ // Check the legacy record version, which should be 0x03, 0x03. ++ if header[1] != 0x03 || header[2] != 0x03 { ++ return nil, nil, fmt.Errorf("incorrect legacy record version in the header") ++ } ++ return header, payload, nil ++} ++ ++// handleAlertMessage handles an alert message. ++func (p *conn) handleAlertMessage() error { ++ if len(p.pendingApplicationData) != tlsAlertSize { ++ return errors.New("invalid alert message size") ++ } ++ alertType := p.pendingApplicationData[1] ++ // Clear the body of the alert message. ++ p.pendingApplicationData = p.pendingApplicationData[:0] ++ if alertType == byte(closeNotify) { ++ return errors.New("received a close notify alert") ++ } ++ // TODO(matthewstevenson88): Add support for more alert types. ++ return fmt.Errorf("received an unrecognized alert type: %v", alertType) ++} ++ ++// parseHandshakeHeader parses a handshake message from the handshake buffer. ++// It returns the message type, the message length, the message, the raw message ++// that includes the type and length bytes and a flag indicating whether the ++// handshake message has been fully parsed. i.e. whether the entire handshake ++// message was in the handshake buffer. ++func (p *conn) parseHandshakeMsg() (msgType byte, msgLen uint32, msg []byte, rawMsg []byte, ok bool) { ++ // Handle the case where the 4 byte handshake header is fragmented. ++ if len(p.handshakeBuf) < tlsHandshakePrefixSize { ++ return 0, 0, nil, nil, false ++ } ++ msgType = p.handshakeBuf[0] ++ msgLen = bigEndianInt24(p.handshakeBuf[tlsHandshakeMsgTypeSize : tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize]) ++ if msgLen > uint32(len(p.handshakeBuf)-tlsHandshakePrefixSize) { ++ return 0, 0, nil, nil, false ++ } ++ msg = p.handshakeBuf[tlsHandshakePrefixSize : tlsHandshakePrefixSize+msgLen] ++ rawMsg = p.handshakeBuf[:tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize+msgLen] ++ p.handshakeBuf = p.handshakeBuf[tlsHandshakePrefixSize+msgLen:] ++ return msgType, msgLen, msg, rawMsg, true ++} ++ ++// handleHandshakeMessage handles a handshake message. Note that the first ++// complete handshake message from the handshake buffer is removed, if it ++// exists. ++func (p *conn) handleHandshakeMessage() error { ++ // Copy the pending application data to the handshake buffer. At this point, ++ // we are guaranteed that the pending application data contains only parts ++ // of a handshake message. ++ p.handshakeBuf = append(p.handshakeBuf, p.pendingApplicationData...) ++ p.pendingApplicationData = p.pendingApplicationData[:0] ++ // Several handshake messages may be coalesced into a single record. ++ // Continue reading them until the handshake buffer is empty. ++ for len(p.handshakeBuf) > 0 { ++ handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() ++ if !ok { ++ // The handshake could not be fully parsed, so read in another ++ // record and try again later. ++ break ++ } ++ switch handshakeMsgType { ++ case tlsHandshakeKeyUpdateType: ++ if msgLen != tlsHandshakeKeyUpdateMsgSize { ++ return errors.New("invalid handshake key update message length") ++ } ++ if len(p.handshakeBuf) != 0 { ++ return errors.New("key update message must be the last message of a handshake record") ++ } ++ if err := p.handleKeyUpdateMsg(msg); err != nil { ++ return err ++ } ++ case tlsHandshakeNewSessionTicketType: ++ // Ignore tickets that are received after a batch of tickets has ++ // been sent to S2A. ++ if p.ticketState == notReceivingTickets { ++ continue ++ } ++ if p.ticketState == ticketsNotYetReceived { ++ p.ticketState = receivingTickets ++ } ++ p.sessionTickets = append(p.sessionTickets, rawMsg) ++ if len(p.sessionTickets) == maxAllowedTickets { ++ p.ticketState = notReceivingTickets ++ grpclog.Infof("Sending session tickets to S2A.") ++ p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) ++ } ++ default: ++ return errors.New("unknown handshake message type") ++ } ++ } ++ return nil ++} ++ ++func buildKeyUpdateRequest() []byte { ++ b := make([]byte, tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize) ++ b[0] = tlsHandshakeKeyUpdateType ++ b[1] = 0 ++ b[2] = 0 ++ b[3] = tlsHandshakeKeyUpdateMsgSize ++ b[4] = byte(updateNotRequested) ++ return b ++} ++ ++// handleKeyUpdateMsg handles a key update message. ++func (p *conn) handleKeyUpdateMsg(msg []byte) error { ++ keyUpdateRequest := msg[0] ++ if keyUpdateRequest != byte(updateNotRequested) && ++ keyUpdateRequest != byte(updateRequested) { ++ return errors.New("invalid handshake key update message") ++ } ++ if err := p.inConn.UpdateKey(); err != nil { ++ return err ++ } ++ // Send a key update message back to the peer if requested. ++ if keyUpdateRequest == byte(updateRequested) { ++ p.writeMutex.Lock() ++ defer p.writeMutex.Unlock() ++ n, err := p.writeTLSRecord(preConstructedKeyUpdateMsg, byte(handshake)) ++ if err != nil { ++ return err ++ } ++ if n != tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize { ++ return errors.New("key update request message wrote less bytes than expected") ++ } ++ if err = p.outConn.UpdateKey(); err != nil { ++ return err ++ } ++ } ++ return nil ++} ++ ++// bidEndianInt24 converts the given byte buffer of at least size 3 and ++// outputs the resulting 24 bit integer as a uint32. This is needed because ++// TLS 1.3 requires 3 byte integers, and the binary.BigEndian package does ++// not provide a way to transform a byte buffer into a 3 byte integer. ++func bigEndianInt24(b []byte) uint32 { ++ _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 ++ return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 ++} ++ ++func min(a, b int) int { ++ if a < b { ++ return a ++ } ++ return b ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go +new file mode 100644 +index 00000000000..33fa3c55d47 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go +@@ -0,0 +1,176 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package record ++ ++import ( ++ "context" ++ "fmt" ++ "sync" ++ "time" ++ ++ "github.com/google/s2a-go/internal/handshaker/service" ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" ++ "github.com/google/s2a-go/internal/tokenmanager" ++ "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/grpclog" ++) ++ ++// sessionTimeout is the timeout for creating a session with the S2A handshaker ++// service. ++const sessionTimeout = time.Second * 5 ++ ++// s2aTicketSender sends session tickets to the S2A handshaker service. ++type s2aTicketSender interface { ++ // sendTicketsToS2A sends the given session tickets to the S2A handshaker ++ // service. ++ sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) ++} ++ ++// ticketStream is the stream used to send and receive session information. ++type ticketStream interface { ++ Send(*s2apb.SessionReq) error ++ Recv() (*s2apb.SessionResp, error) ++} ++ ++type ticketSender struct { ++ // hsAddr stores the address of the S2A handshaker service. ++ hsAddr string ++ // connectionID is the connection identifier that was created and sent by ++ // S2A at the end of a handshake. ++ connectionID uint64 ++ // localIdentity is the local identity that was used by S2A during session ++ // setup and included in the session result. ++ localIdentity *commonpb.Identity ++ // tokenManager manages access tokens for authenticating to S2A. ++ tokenManager tokenmanager.AccessTokenManager ++ // ensureProcessSessionTickets allows users to wait and ensure that all ++ // available session tickets are sent to S2A before a process completes. ++ ensureProcessSessionTickets *sync.WaitGroup ++} ++ ++// sendTicketsToS2A sends the given sessionTickets to the S2A handshaker ++// service. This is done asynchronously and writes to the error logs if an error ++// occurs. ++func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) { ++ // Note that the goroutine is in the function rather than at the caller ++ // because the fake ticket sender used for testing must run synchronously ++ // so that the session tickets can be accessed from it after the tests have ++ // been run. ++ if t.ensureProcessSessionTickets != nil { ++ t.ensureProcessSessionTickets.Add(1) ++ } ++ go func() { ++ if err := func() error { ++ defer func() { ++ if t.ensureProcessSessionTickets != nil { ++ t.ensureProcessSessionTickets.Done() ++ } ++ }() ++ hsConn, err := service.Dial(t.hsAddr) ++ if err != nil { ++ return err ++ } ++ client := s2apb.NewS2AServiceClient(hsConn) ++ ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) ++ defer cancel() ++ session, err := client.SetUpSession(ctx) ++ if err != nil { ++ return err ++ } ++ defer func() { ++ if err := session.CloseSend(); err != nil { ++ grpclog.Error(err) ++ } ++ }() ++ return t.writeTicketsToStream(session, sessionTickets) ++ }(); err != nil { ++ grpclog.Errorf("failed to send resumption tickets to S2A with identity: %v, %v", ++ t.localIdentity, err) ++ } ++ callComplete <- true ++ close(callComplete) ++ }() ++} ++ ++// writeTicketsToStream writes the given session tickets to the given stream. ++func (t *ticketSender) writeTicketsToStream(stream ticketStream, sessionTickets [][]byte) error { ++ if err := stream.Send( ++ &s2apb.SessionReq{ ++ ReqOneof: &s2apb.SessionReq_ResumptionTicket{ ++ ResumptionTicket: &s2apb.ResumptionTicketReq{ ++ InBytes: sessionTickets, ++ ConnectionId: t.connectionID, ++ LocalIdentity: t.localIdentity, ++ }, ++ }, ++ AuthMechanisms: t.getAuthMechanisms(), ++ }, ++ ); err != nil { ++ return err ++ } ++ sessionResp, err := stream.Recv() ++ if err != nil { ++ return err ++ } ++ if sessionResp.GetStatus().GetCode() != uint32(codes.OK) { ++ return fmt.Errorf("s2a session ticket response had error status: %v, %v", ++ sessionResp.GetStatus().GetCode(), sessionResp.GetStatus().GetDetails()) ++ } ++ return nil ++} ++ ++func (t *ticketSender) getAuthMechanisms() []*s2apb.AuthenticationMechanism { ++ if t.tokenManager == nil { ++ return nil ++ } ++ // First handle the special case when no local identity has been provided ++ // by the application. In this case, an AuthenticationMechanism with no local ++ // identity will be sent. ++ if t.localIdentity == nil { ++ token, err := t.tokenManager.DefaultToken() ++ if err != nil { ++ grpclog.Infof("unable to get token for empty local identity: %v", err) ++ return nil ++ } ++ return []*s2apb.AuthenticationMechanism{ ++ { ++ MechanismOneof: &s2apb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }, ++ } ++ } ++ ++ // Next, handle the case where the application (or the S2A) has specified ++ // a local identity. ++ token, err := t.tokenManager.Token(t.localIdentity) ++ if err != nil { ++ grpclog.Infof("unable to get token for local identity %v: %v", t.localIdentity, err) ++ return nil ++ } ++ return []*s2apb.AuthenticationMechanism{ ++ { ++ Identity: t.localIdentity, ++ MechanismOneof: &s2apb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }, ++ } ++} +diff --git a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +new file mode 100644 +index 00000000000..ec96ba3b6a6 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +@@ -0,0 +1,70 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package tokenmanager provides tokens for authenticating to S2A. ++package tokenmanager ++ ++import ( ++ "fmt" ++ "os" ++ ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++) ++ ++const ( ++ s2aAccessTokenEnvironmentVariable = "S2A_ACCESS_TOKEN" ++) ++ ++// AccessTokenManager manages tokens for authenticating to S2A. ++type AccessTokenManager interface { ++ // DefaultToken returns a token that an application with no specified local ++ // identity must use to authenticate to S2A. ++ DefaultToken() (token string, err error) ++ // Token returns a token that an application with local identity equal to ++ // identity must use to authenticate to S2A. ++ Token(identity *commonpb.Identity) (token string, err error) ++} ++ ++type singleTokenAccessTokenManager struct { ++ token string ++} ++ ++// NewSingleTokenAccessTokenManager returns a new AccessTokenManager instance ++// that will always manage the same token. ++// ++// The token to be managed is read from the s2aAccessTokenEnvironmentVariable ++// environment variable. If this environment variable is not set, then this ++// function returns an error. ++func NewSingleTokenAccessTokenManager() (AccessTokenManager, error) { ++ token, variableExists := os.LookupEnv(s2aAccessTokenEnvironmentVariable) ++ if !variableExists { ++ return nil, fmt.Errorf("%s environment variable is not set", s2aAccessTokenEnvironmentVariable) ++ } ++ return &singleTokenAccessTokenManager{token: token}, nil ++} ++ ++// DefaultToken always returns the token managed by the ++// singleTokenAccessTokenManager. ++func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { ++ return m.token, nil ++} ++ ++// Token always returns the token managed by the singleTokenAccessTokenManager. ++func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { ++ return m.token, nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/v2/README.md b/vendor/github.com/google/s2a-go/internal/v2/README.md +new file mode 100644 +index 00000000000..3806d1e9ccc +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/v2/README.md +@@ -0,0 +1 @@ ++**This directory has the implementation of the S2Av2's gRPC-Go client libraries** +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go b/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go +new file mode 100644 +index 00000000000..cc811879b53 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go +@@ -0,0 +1,122 @@ ++/* ++ * ++ * Copyright 2022 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package certverifier offloads verifications to S2Av2. ++package certverifier ++ ++import ( ++ "crypto/x509" ++ "fmt" ++ ++ "github.com/google/s2a-go/stream" ++ "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/grpclog" ++ ++ s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ++) ++ ++// VerifyClientCertificateChain builds a SessionReq, sends it to S2Av2 and ++// receives a SessionResp. ++func VerifyClientCertificateChain(verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { ++ return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { ++ // Offload verification to S2Av2. ++ if grpclog.V(1) { ++ grpclog.Infof("Sending request to S2Av2 for client peer cert chain validation.") ++ } ++ if err := s2AStream.Send(&s2av2pb.SessionReq{ ++ ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ ++ ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ ++ Mode: verificationMode, ++ PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer_{ ++ ClientPeer: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer{ ++ CertificateChain: rawCerts, ++ }, ++ }, ++ }, ++ }, ++ }); err != nil { ++ grpclog.Infof("Failed to send request to S2Av2 for client peer cert chain validation.") ++ return err ++ } ++ ++ // Get the response from S2Av2. ++ resp, err := s2AStream.Recv() ++ if err != nil { ++ grpclog.Infof("Failed to receive client peer cert chain validation response from S2Av2.") ++ return err ++ } ++ ++ // Parse the response. ++ if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { ++ return fmt.Errorf("failed to offload client cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) ++ ++ } ++ ++ if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { ++ return fmt.Errorf("client cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) ++ } ++ ++ return nil ++ } ++} ++ ++// VerifyServerCertificateChain builds a SessionReq, sends it to S2Av2 and ++// receives a SessionResp. ++func VerifyServerCertificateChain(hostname string, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream, serverAuthorizationPolicy []byte) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { ++ return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { ++ // Offload verification to S2Av2. ++ if grpclog.V(1) { ++ grpclog.Infof("Sending request to S2Av2 for server peer cert chain validation.") ++ } ++ if err := s2AStream.Send(&s2av2pb.SessionReq{ ++ ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ ++ ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ ++ Mode: verificationMode, ++ PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer_{ ++ ServerPeer: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer{ ++ CertificateChain: rawCerts, ++ ServerHostname: hostname, ++ SerializedUnrestrictedClientPolicy: serverAuthorizationPolicy, ++ }, ++ }, ++ }, ++ }, ++ }); err != nil { ++ grpclog.Infof("Failed to send request to S2Av2 for server peer cert chain validation.") ++ return err ++ } ++ ++ // Get the response from S2Av2. ++ resp, err := s2AStream.Recv() ++ if err != nil { ++ grpclog.Infof("Failed to receive server peer cert chain validation response from S2Av2.") ++ return err ++ } ++ ++ // Parse the response. ++ if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { ++ return fmt.Errorf("failed to offload server cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) ++ } ++ ++ if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { ++ return fmt.Errorf("server cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) ++ } ++ ++ return nil ++ } ++} +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..958f3cfaddf3645fa6c0578b5b6955d65ac4c172 +GIT binary patch +literal 998 +zcmXqLVt!=M#B^!_GZP~dlZfxTkgMw#Mx5CteAhsdS=K*t&xAY!UN%mxHjlRNyo`+8 +ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# +zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< +zWEA7BsH}1TV!h=2Tmw0AULzv|Gec7&Lt`^z^C)p%6J+ina%mHz5^^vyvNA9?G4eAQ +zG%<29H8CnfQWlH+MyjTjz +z2Bjk+Ub9TzT(Z$!`+w1f9a{SYn?h^$U$mDJKK^KOurlxGlhf8coXvjW^}bgt-rFjD +znHJ|Xo9Wew;}%JlW_Kjsthc+chb^XXpU3LOdy_KCS4OTntgUFd?YsGkV{8gPCrIUU +z7xZ1<|6Oe5g44&3J$ODp>G5YKW=00a#V!U;2J*mEAgjzGVIbBZvce&_^W7!+r|T*d +zavIkkZ(n>dQ`LY6q(GR3)qt6i@xOr}h$qOxWx&zImXe>Fn2DZTf#J-^uui3sYst!# +z{ImS~I{#0uS#WDz%!i-3yIxOGkACKhN$DyM2L{LbrkL@xJCpY3crHP)J~LHEx;nsZlX$z1Na++S~*x&F8L +zSHsIX`Pf;-7fz4f$yxWfRV}c$Te2$X#22gkrUlwjtNp9{IiD}~zjo}z)1J=iJoj~z +zPFk$n)~eK8B%5+0@w1g$N8H(l+-H4!LOMmyS_+%b`$^TD`}g=z?!9=Gts-l=GE2@y +HTP6bly#{pd + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..d2817641bafb022339926786ab85b545f40ac665 +GIT binary patch +literal 1147 +zcmXqLVktLhVvb+H%*4pVB*OPMYW498DSV5WgVNVW|Jr8oCgimNFB_*;n@8JsUPeZ4 +zRtAH{c0+ChPB!LH7B*p~&|nycgNMo4(NM@h03^fC!x>zfmseSqn3HNKV890w6yxnyM_06xZQcZ?W@M4y!->Y40>Qzg1y0 +z&gNQpqF{|c>D+ZZ(rFIKEL_n{%!~|-iyLnkG+s4m+z3nvvdS!tD-9Z#Eo|&v(%3b4 +zVzNoZ_g%=<^$R1;>=M3fAjvH2AGv2jo&gWY$-*qG2F#3%{|y8|JV6#N1CA!Pl>FSp +z%sk}C2j(GQ()!XwH^ngbB~t_{rh@w&h5|n>_#Tt3odjy +zuPCdY_u)fF^#8=jc^^+Q{aqN5`$@Gm({I-q6VU_LtQOD6naL>j(`V|D&;QI6%^$Dc +z@$uH$=blqSKTV1FFaEpmVP=>@vvi*Y`l|mqO|ORNA0oIS8nOI +z?NiA8%5=ljL@D29-OKpueEYz|zv5rHb554&taa#n7142tpUKWU?R-O_r^0)6j>G3Y +aOB;Xxy<$H9NT-Uzt(mu6i*GEf3IzZ)c%0q< + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..d8c3710c85f9ff41ddfc709924c866350a727a4f +GIT binary patch +literal 1013 +zcmXqLV*Y5*#B_53GZP~dlZa+?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg +zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW +z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N +zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! +zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ +zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic +z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 +zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C +z&5N-8^H(H$?psgMz!}^;s& +z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ +zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) +zTR0VF*I2Sr +HHQW{e5x{wf + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..dae619c097512f20d09d2054c63fc0f715d7be24 +GIT binary patch +literal 998 +zcmXqLVt!=M#B^!_GZP~dlZZk6mxvepr#-!NVoqOL*`yn*LOZ7#@Un4gwRyCC=VfH% +zW@RvFY&YaK;ACSCWnmL$3Jr#FICz+x9SwyH1VA$EJeHTH8JZfJT11KSnjmurkxQExm5_sxk(GhDiIJbd +zpox)-sfm%1VO{t`EzVyWX$3+}`kJnrGQY>^=jnZyz4d%;|IQeuB_Gcfo1a{_>B5Fg +zXYbI=4K*7QZdjc*51-)~EjRzyir2SPA5Z@zcTLeXV*g^*2f0P(7p>c +z?c<>#uh>H(ryKBq6bQ4h8Za|5{x=W=@dR183^XGxLzc8JJvw;mpWjp+5Pu +z!*lq#5Sb^i@`a>klJNjzx%iu)2*?h3yy-gEL(;p^g~mZo#n!wzh9Qua5C+wfRd +ztarh_l9E{Oro7&WNVTl@3-~vy&EDU;)-f|r-8<&mQ>C=;``&D2+5V0(pW(g3o1}Sj +z^gY793Mi&+ntK1`1tm%ENt=U{rb%k1PtbRHEhYTylqR?8HqOeuvA1fBY>pc~NUFTR +KZE!sDzAgX``fm0B + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..ce7f8d31d6802c7e68c188af8797c3a063894857 +GIT binary patch +literal 1147 +zcmXqLVktLhVvb+H%*4pVB%&$)xidhT`$ARBi)~L`p2_`@+!*`b5acj4ER7|Ts-W~ +z`MCv&d6kBO2K*oqZXOPo%(Bel%=|nTUzhK>3dC_Z(le%bCn$9moio9jI@8{ +zd;5%c{XT!(ig$;5K+$bpwI6ChVmse+UV1r`!RY5*U#A%ppFJxS>PX#w$sz7*X_R~A +zg!|$g&z66XI$)aW-TrxDio@fp!U^sS#+*GdHMB|H?BTf!^g~J_=D$Y-Ot{~xpTg~G+{k6 +Zdr{DieczoQDL-a%Ib3(Tsv+bGCjf+Ap-TV& + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..04b0d73600b72f80a03943d41973b279db9e8b32 +GIT binary patch +literal 1013 +zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 +ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# +zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< +zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ +z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) +z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ +zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J +zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ +zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ +zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk +zCap+`USuL4V}qf=6kBV_EFAN +zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg +zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW +z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N +zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! +zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ +zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic +z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 +zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C +z&5N-8^H(H$?psgMz!}^;s& +z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ +zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) +zTR0VF*I2Sr +HHQW{e5x{wf + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem +new file mode 100644 +index 00000000000..493a5a26481 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem +@@ -0,0 +1,24 @@ ++-----BEGIN CERTIFICATE----- ++MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL ++BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 ++YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE ++AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN ++MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ ++BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx ++ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ ++KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC ++AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 ++a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 ++OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 ++RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK ++P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 ++HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu ++0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl ++MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 ++EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 ++/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA ++QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ ++nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD ++X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco ++pKklVz0= ++-----END CERTIFICATE----- +diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem +new file mode 100644 +index 00000000000..55a7f10c742 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem +@@ -0,0 +1,27 @@ ++-----BEGIN RSA PRIVATE KEY----- ++MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF ++l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj +++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G ++4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA ++xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh ++68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ ++/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL ++Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA ++VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 ++9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH ++MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt ++aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq ++xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx ++2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv ++EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z ++aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq ++udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs ++VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm ++56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT ++GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V ++Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm ++HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q ++BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH ++qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh ++GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ++-----END RSA PRIVATE KEY----- +diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..04b0d73600b72f80a03943d41973b279db9e8b32 +GIT binary patch +literal 1013 +zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 +ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# +zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< +zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ +z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) +z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ +zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J +zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ +zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ +zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk +zCap+`USuL4V}qf=6kBV_EFAN +zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta 0 { ++ cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) ++ if cert.PrivateKey == nil { ++ return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") ++ } ++ } ++ ++ minVersion, maxVersion, err := getTLSMinMaxVersionsClient(tlsConfig) ++ if err != nil { ++ return nil, err ++ } ++ ++ // Create mTLS credentials for client. ++ config := &tls.Config{ ++ VerifyPeerCertificate: certverifier.VerifyServerCertificateChain(serverHostname, verificationMode, s2AStream, serverAuthorizationPolicy), ++ ServerName: serverHostname, ++ InsecureSkipVerify: true, // NOLINT ++ ClientSessionCache: nil, ++ SessionTicketsDisabled: true, ++ MinVersion: minVersion, ++ MaxVersion: maxVersion, ++ NextProtos: []string{h2}, ++ } ++ if len(tlsConfig.CertificateChain) > 0 { ++ config.Certificates = []tls.Certificate{cert} ++ } ++ return config, nil ++} ++ ++// GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. ++func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { ++ return &tls.Config{ ++ GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), ++ }, nil ++} ++ ++// ClientConfig builds a TLS config for a server to establish a secure ++// connection with a client, based on SNI communicated during ClientHello. ++// Ensures that server presents the correct certificate to establish a TLS ++// connection. ++func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { ++ return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { ++ tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) ++ if err != nil { ++ return nil, err ++ } ++ ++ var cert tls.Certificate ++ for i, v := range tlsConfig.CertificateChain { ++ // Populate Certificates field. ++ block, _ := pem.Decode([]byte(v)) ++ if block == nil { ++ return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") ++ } ++ x509Cert, err := x509.ParseCertificate(block.Bytes) ++ if err != nil { ++ return nil, err ++ } ++ cert.Certificate = append(cert.Certificate, x509Cert.Raw) ++ if i == 0 { ++ cert.Leaf = x509Cert ++ } ++ } ++ ++ cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) ++ if cert.PrivateKey == nil { ++ return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") ++ } ++ ++ minVersion, maxVersion, err := getTLSMinMaxVersionsServer(tlsConfig) ++ if err != nil { ++ return nil, err ++ } ++ ++ clientAuth := getTLSClientAuthType(tlsConfig) ++ ++ var cipherSuites []uint16 ++ cipherSuites = getCipherSuites(tlsConfig.Ciphersuites) ++ ++ // Create mTLS credentials for server. ++ return &tls.Config{ ++ Certificates: []tls.Certificate{cert}, ++ VerifyPeerCertificate: certverifier.VerifyClientCertificateChain(verificationMode, s2AStream), ++ ClientAuth: clientAuth, ++ CipherSuites: cipherSuites, ++ SessionTicketsDisabled: true, ++ MinVersion: minVersion, ++ MaxVersion: maxVersion, ++ NextProtos: []string{h2}, ++ }, nil ++ } ++} ++ ++func getCipherSuites(tlsConfigCipherSuites []commonpb.Ciphersuite) []uint16 { ++ var tlsGoCipherSuites []uint16 ++ for _, v := range tlsConfigCipherSuites { ++ s := getTLSCipherSuite(v) ++ if s != 0xffff { ++ tlsGoCipherSuites = append(tlsGoCipherSuites, s) ++ } ++ } ++ return tlsGoCipherSuites ++} ++ ++func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { ++ switch tlsCipherSuite { ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: ++ return tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: ++ return tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: ++ return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256: ++ return tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384: ++ return tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: ++ return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 ++ default: ++ return 0xffff ++ } ++} ++ ++func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { ++ authMechanisms := getAuthMechanisms(tokenManager, localIdentities) ++ var locID *commonpbv1.Identity ++ if localIdentities != nil { ++ locID = localIdentities[0] ++ } ++ ++ if err := s2AStream.Send(&s2av2pb.SessionReq{ ++ LocalIdentity: locID, ++ AuthenticationMechanisms: authMechanisms, ++ ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ ++ GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ ++ ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_SERVER, ++ Sni: sni, ++ }, ++ }, ++ }); err != nil { ++ return nil, err ++ } ++ ++ resp, err := s2AStream.Recv() ++ if err != nil { ++ return nil, err ++ } ++ ++ // TODO(rmehta19): Add unit test for this if statement. ++ if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { ++ return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) ++ } ++ ++ return resp.GetGetTlsConfigurationResp().GetServerTlsConfiguration(), nil ++} ++ ++func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) tls.ClientAuthType { ++ var clientAuth tls.ClientAuthType ++ switch x := tlsConfig.RequestClientCertificate; x { ++ case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE: ++ clientAuth = tls.NoClientCert ++ case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: ++ clientAuth = tls.RequestClientCert ++ case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY: ++ // This case actually maps to tls.VerifyClientCertIfGiven. However this ++ // mapping triggers normal verification, followed by custom verification, ++ // specified in VerifyPeerCertificate. To bypass normal verification, and ++ // only do custom verification we set clientAuth to RequireAnyClientCert or ++ // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full ++ // discussion. ++ clientAuth = tls.RequireAnyClientCert ++ case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: ++ clientAuth = tls.RequireAnyClientCert ++ case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY: ++ // This case actually maps to tls.RequireAndVerifyClientCert. However this ++ // mapping triggers normal verification, followed by custom verification, ++ // specified in VerifyPeerCertificate. To bypass normal verification, and ++ // only do custom verification we set clientAuth to RequireAnyClientCert or ++ // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full ++ // discussion. ++ clientAuth = tls.RequireAnyClientCert ++ default: ++ clientAuth = tls.RequireAnyClientCert ++ } ++ return clientAuth ++} ++ ++func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { ++ if tokenManager == nil { ++ return nil ++ } ++ if len(localIdentities) == 0 { ++ token, err := tokenManager.DefaultToken() ++ if err != nil { ++ grpclog.Infof("Unable to get token for empty local identity: %v", err) ++ return nil ++ } ++ return []*s2av2pb.AuthenticationMechanism{ ++ { ++ MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }, ++ } ++ } ++ var authMechanisms []*s2av2pb.AuthenticationMechanism ++ for _, localIdentity := range localIdentities { ++ if localIdentity == nil { ++ token, err := tokenManager.DefaultToken() ++ if err != nil { ++ grpclog.Infof("Unable to get default token for local identity %v: %v", localIdentity, err) ++ continue ++ } ++ authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ ++ Identity: localIdentity, ++ MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }) ++ } else { ++ token, err := tokenManager.Token(localIdentity) ++ if err != nil { ++ grpclog.Infof("Unable to get token for local identity %v: %v", localIdentity, err) ++ continue ++ } ++ authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ ++ Identity: localIdentity, ++ MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }) ++ } ++ } ++ return authMechanisms ++} ++ ++// TODO(rmehta19): refactor switch statements into a helper function. ++func getTLSMinMaxVersionsClient(tlsConfig *s2av2pb.GetTlsConfigurationResp_ClientTlsConfiguration) (uint16, uint16, error) { ++ // Map S2Av2 TLSVersion to consts defined in tls package. ++ var minVersion uint16 ++ var maxVersion uint16 ++ switch x := tlsConfig.MinTlsVersion; x { ++ case commonpb.TLSVersion_TLS_VERSION_1_0: ++ minVersion = tls.VersionTLS10 ++ case commonpb.TLSVersion_TLS_VERSION_1_1: ++ minVersion = tls.VersionTLS11 ++ case commonpb.TLSVersion_TLS_VERSION_1_2: ++ minVersion = tls.VersionTLS12 ++ case commonpb.TLSVersion_TLS_VERSION_1_3: ++ minVersion = tls.VersionTLS13 ++ default: ++ return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) ++ } ++ ++ switch x := tlsConfig.MaxTlsVersion; x { ++ case commonpb.TLSVersion_TLS_VERSION_1_0: ++ maxVersion = tls.VersionTLS10 ++ case commonpb.TLSVersion_TLS_VERSION_1_1: ++ maxVersion = tls.VersionTLS11 ++ case commonpb.TLSVersion_TLS_VERSION_1_2: ++ maxVersion = tls.VersionTLS12 ++ case commonpb.TLSVersion_TLS_VERSION_1_3: ++ maxVersion = tls.VersionTLS13 ++ default: ++ return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) ++ } ++ if minVersion > maxVersion { ++ return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") ++ } ++ return minVersion, maxVersion, nil ++} ++ ++func getTLSMinMaxVersionsServer(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) (uint16, uint16, error) { ++ // Map S2Av2 TLSVersion to consts defined in tls package. ++ var minVersion uint16 ++ var maxVersion uint16 ++ switch x := tlsConfig.MinTlsVersion; x { ++ case commonpb.TLSVersion_TLS_VERSION_1_0: ++ minVersion = tls.VersionTLS10 ++ case commonpb.TLSVersion_TLS_VERSION_1_1: ++ minVersion = tls.VersionTLS11 ++ case commonpb.TLSVersion_TLS_VERSION_1_2: ++ minVersion = tls.VersionTLS12 ++ case commonpb.TLSVersion_TLS_VERSION_1_3: ++ minVersion = tls.VersionTLS13 ++ default: ++ return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) ++ } ++ ++ switch x := tlsConfig.MaxTlsVersion; x { ++ case commonpb.TLSVersion_TLS_VERSION_1_0: ++ maxVersion = tls.VersionTLS10 ++ case commonpb.TLSVersion_TLS_VERSION_1_1: ++ maxVersion = tls.VersionTLS11 ++ case commonpb.TLSVersion_TLS_VERSION_1_2: ++ maxVersion = tls.VersionTLS12 ++ case commonpb.TLSVersion_TLS_VERSION_1_3: ++ maxVersion = tls.VersionTLS13 ++ default: ++ return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) ++ } ++ if minVersion > maxVersion { ++ return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") ++ } ++ return minVersion, maxVersion, nil ++} +diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go +new file mode 100644 +index 00000000000..1c1349de4af +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/s2a.go +@@ -0,0 +1,412 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package s2a provides the S2A transport credentials used by a gRPC ++// application. ++package s2a ++ ++import ( ++ "context" ++ "crypto/tls" ++ "errors" ++ "fmt" ++ "net" ++ "sync" ++ "time" ++ ++ "github.com/golang/protobuf/proto" ++ "github.com/google/s2a-go/fallback" ++ "github.com/google/s2a-go/internal/handshaker" ++ "github.com/google/s2a-go/internal/handshaker/service" ++ "github.com/google/s2a-go/internal/tokenmanager" ++ "github.com/google/s2a-go/internal/v2" ++ "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/grpclog" ++ ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ++) ++ ++const ( ++ s2aSecurityProtocol = "tls" ++ // defaultTimeout specifies the default server handshake timeout. ++ defaultTimeout = 30.0 * time.Second ++) ++ ++// s2aTransportCreds are the transport credentials required for establishing ++// a secure connection using the S2A. They implement the ++// credentials.TransportCredentials interface. ++type s2aTransportCreds struct { ++ info *credentials.ProtocolInfo ++ minTLSVersion commonpb.TLSVersion ++ maxTLSVersion commonpb.TLSVersion ++ // tlsCiphersuites contains the ciphersuites used in the S2A connection. ++ // Note that these are currently unconfigurable. ++ tlsCiphersuites []commonpb.Ciphersuite ++ // localIdentity should only be used by the client. ++ localIdentity *commonpb.Identity ++ // localIdentities should only be used by the server. ++ localIdentities []*commonpb.Identity ++ // targetIdentities should only be used by the client. ++ targetIdentities []*commonpb.Identity ++ isClient bool ++ s2aAddr string ++ ensureProcessSessionTickets *sync.WaitGroup ++} ++ ++// NewClientCreds returns a client-side transport credentials object that uses ++// the S2A to establish a secure connection with a server. ++func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, error) { ++ if opts == nil { ++ return nil, errors.New("nil client options") ++ } ++ var targetIdentities []*commonpb.Identity ++ for _, targetIdentity := range opts.TargetIdentities { ++ protoTargetIdentity, err := toProtoIdentity(targetIdentity) ++ if err != nil { ++ return nil, err ++ } ++ targetIdentities = append(targetIdentities, protoTargetIdentity) ++ } ++ localIdentity, err := toProtoIdentity(opts.LocalIdentity) ++ if err != nil { ++ return nil, err ++ } ++ if opts.EnableLegacyMode { ++ return &s2aTransportCreds{ ++ info: &credentials.ProtocolInfo{ ++ SecurityProtocol: s2aSecurityProtocol, ++ }, ++ minTLSVersion: commonpb.TLSVersion_TLS1_3, ++ maxTLSVersion: commonpb.TLSVersion_TLS1_3, ++ tlsCiphersuites: []commonpb.Ciphersuite{ ++ commonpb.Ciphersuite_AES_128_GCM_SHA256, ++ commonpb.Ciphersuite_AES_256_GCM_SHA384, ++ commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, ++ }, ++ localIdentity: localIdentity, ++ targetIdentities: targetIdentities, ++ isClient: true, ++ s2aAddr: opts.S2AAddress, ++ ensureProcessSessionTickets: opts.EnsureProcessSessionTickets, ++ }, nil ++ } ++ verificationMode := getVerificationMode(opts.VerificationMode) ++ var fallbackFunc fallback.ClientHandshake ++ if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { ++ fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc ++ } ++ return v2.NewClientCreds(opts.S2AAddress, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) ++} ++ ++// NewServerCreds returns a server-side transport credentials object that uses ++// the S2A to establish a secure connection with a client. ++func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, error) { ++ if opts == nil { ++ return nil, errors.New("nil server options") ++ } ++ var localIdentities []*commonpb.Identity ++ for _, localIdentity := range opts.LocalIdentities { ++ protoLocalIdentity, err := toProtoIdentity(localIdentity) ++ if err != nil { ++ return nil, err ++ } ++ localIdentities = append(localIdentities, protoLocalIdentity) ++ } ++ if opts.EnableLegacyMode { ++ return &s2aTransportCreds{ ++ info: &credentials.ProtocolInfo{ ++ SecurityProtocol: s2aSecurityProtocol, ++ }, ++ minTLSVersion: commonpb.TLSVersion_TLS1_3, ++ maxTLSVersion: commonpb.TLSVersion_TLS1_3, ++ tlsCiphersuites: []commonpb.Ciphersuite{ ++ commonpb.Ciphersuite_AES_128_GCM_SHA256, ++ commonpb.Ciphersuite_AES_256_GCM_SHA384, ++ commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, ++ }, ++ localIdentities: localIdentities, ++ isClient: false, ++ s2aAddr: opts.S2AAddress, ++ }, nil ++ } ++ verificationMode := getVerificationMode(opts.VerificationMode) ++ return v2.NewServerCreds(opts.S2AAddress, localIdentities, verificationMode, opts.getS2AStream) ++} ++ ++// ClientHandshake initiates a client-side TLS handshake using the S2A. ++func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { ++ if !c.isClient { ++ return nil, nil, errors.New("client handshake called using server transport credentials") ++ } ++ ++ // Connect to the S2A. ++ hsConn, err := service.Dial(c.s2aAddr) ++ if err != nil { ++ grpclog.Infof("Failed to connect to S2A: %v", err) ++ return nil, nil, err ++ } ++ ++ var cancel context.CancelFunc ++ ctx, cancel = context.WithCancel(ctx) ++ defer cancel() ++ ++ opts := &handshaker.ClientHandshakerOptions{ ++ MinTLSVersion: c.minTLSVersion, ++ MaxTLSVersion: c.maxTLSVersion, ++ TLSCiphersuites: c.tlsCiphersuites, ++ TargetIdentities: c.targetIdentities, ++ LocalIdentity: c.localIdentity, ++ TargetName: serverAuthority, ++ EnsureProcessSessionTickets: c.ensureProcessSessionTickets, ++ } ++ chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) ++ if err != nil { ++ grpclog.Infof("Call to handshaker.NewClientHandshaker failed: %v", err) ++ return nil, nil, err ++ } ++ defer func() { ++ if err != nil { ++ if closeErr := chs.Close(); closeErr != nil { ++ grpclog.Infof("Close failed unexpectedly: %v", err) ++ err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) ++ } ++ } ++ }() ++ ++ secConn, authInfo, err := chs.ClientHandshake(context.Background()) ++ if err != nil { ++ grpclog.Infof("Handshake failed: %v", err) ++ return nil, nil, err ++ } ++ return secConn, authInfo, nil ++} ++ ++// ServerHandshake initiates a server-side TLS handshake using the S2A. ++func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { ++ if c.isClient { ++ return nil, nil, errors.New("server handshake called using client transport credentials") ++ } ++ ++ // Connect to the S2A. ++ hsConn, err := service.Dial(c.s2aAddr) ++ if err != nil { ++ grpclog.Infof("Failed to connect to S2A: %v", err) ++ return nil, nil, err ++ } ++ ++ ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) ++ defer cancel() ++ ++ opts := &handshaker.ServerHandshakerOptions{ ++ MinTLSVersion: c.minTLSVersion, ++ MaxTLSVersion: c.maxTLSVersion, ++ TLSCiphersuites: c.tlsCiphersuites, ++ LocalIdentities: c.localIdentities, ++ } ++ shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) ++ if err != nil { ++ grpclog.Infof("Call to handshaker.NewServerHandshaker failed: %v", err) ++ return nil, nil, err ++ } ++ defer func() { ++ if err != nil { ++ if closeErr := shs.Close(); closeErr != nil { ++ grpclog.Infof("Close failed unexpectedly: %v", err) ++ err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) ++ } ++ } ++ }() ++ ++ secConn, authInfo, err := shs.ServerHandshake(context.Background()) ++ if err != nil { ++ grpclog.Infof("Handshake failed: %v", err) ++ return nil, nil, err ++ } ++ return secConn, authInfo, nil ++} ++ ++func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { ++ return *c.info ++} ++ ++func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { ++ info := *c.info ++ var localIdentity *commonpb.Identity ++ if c.localIdentity != nil { ++ localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) ++ } ++ var localIdentities []*commonpb.Identity ++ if c.localIdentities != nil { ++ localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) ++ for i, localIdentity := range c.localIdentities { ++ localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) ++ } ++ } ++ var targetIdentities []*commonpb.Identity ++ if c.targetIdentities != nil { ++ targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) ++ for i, targetIdentity := range c.targetIdentities { ++ targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) ++ } ++ } ++ return &s2aTransportCreds{ ++ info: &info, ++ minTLSVersion: c.minTLSVersion, ++ maxTLSVersion: c.maxTLSVersion, ++ tlsCiphersuites: c.tlsCiphersuites, ++ localIdentity: localIdentity, ++ localIdentities: localIdentities, ++ targetIdentities: targetIdentities, ++ isClient: c.isClient, ++ s2aAddr: c.s2aAddr, ++ } ++} ++ ++func (c *s2aTransportCreds) OverrideServerName(serverNameOverride string) error { ++ c.info.ServerName = serverNameOverride ++ return nil ++} ++ ++// TLSClientConfigOptions specifies parameters for creating client TLS config. ++type TLSClientConfigOptions struct { ++ // ServerName is required by s2a as the expected name when verifying the hostname found in server's certificate. ++ // tlsConfig, _ := factory.Build(ctx, &s2a.TLSClientConfigOptions{ ++ // ServerName: "example.com", ++ // }) ++ ServerName string ++} ++ ++// TLSClientConfigFactory defines the interface for a client TLS config factory. ++type TLSClientConfigFactory interface { ++ Build(ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) ++} ++ ++// NewTLSClientConfigFactory returns an instance of s2aTLSClientConfigFactory. ++func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, error) { ++ if opts == nil { ++ return nil, fmt.Errorf("opts must be non-nil") ++ } ++ if opts.EnableLegacyMode { ++ return nil, fmt.Errorf("NewTLSClientConfigFactory only supports S2Av2") ++ } ++ tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() ++ if err != nil { ++ // The only possible error is: access token not set in the environment, ++ // which is okay in environments other than serverless. ++ grpclog.Infof("Access token manager not initialized: %v", err) ++ return &s2aTLSClientConfigFactory{ ++ s2av2Address: opts.S2AAddress, ++ tokenManager: nil, ++ verificationMode: getVerificationMode(opts.VerificationMode), ++ serverAuthorizationPolicy: opts.serverAuthorizationPolicy, ++ }, nil ++ } ++ return &s2aTLSClientConfigFactory{ ++ s2av2Address: opts.S2AAddress, ++ tokenManager: tokenManager, ++ verificationMode: getVerificationMode(opts.VerificationMode), ++ serverAuthorizationPolicy: opts.serverAuthorizationPolicy, ++ }, nil ++} ++ ++type s2aTLSClientConfigFactory struct { ++ s2av2Address string ++ tokenManager tokenmanager.AccessTokenManager ++ verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode ++ serverAuthorizationPolicy []byte ++} ++ ++func (f *s2aTLSClientConfigFactory) Build( ++ ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) { ++ serverName := "" ++ if opts != nil && opts.ServerName != "" { ++ serverName = opts.ServerName ++ } ++ return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) ++} ++ ++func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { ++ switch verificationMode { ++ case ConnectToGoogle: ++ return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ++ case Spiffe: ++ return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE ++ default: ++ return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED ++ } ++} ++ ++// NewS2ADialTLSContextFunc returns a dialer which establishes an MTLS connection using S2A. ++// Example use with http.RoundTripper: ++// ++// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ ++// S2AAddress: s2aAddress, // required ++// }) ++// transport := http.DefaultTransport ++// transport.DialTLSContext = dialTLSContext ++func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, network, addr string) (net.Conn, error) { ++ ++ return func(ctx context.Context, network, addr string) (net.Conn, error) { ++ ++ fallback := func(err error) (net.Conn, error) { ++ if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackDialer != nil && ++ opts.FallbackOpts.FallbackDialer.Dialer != nil && opts.FallbackOpts.FallbackDialer.ServerAddr != "" { ++ fbDialer := opts.FallbackOpts.FallbackDialer ++ grpclog.Infof("fall back to dial: %s", fbDialer.ServerAddr) ++ fbConn, fbErr := fbDialer.Dialer.DialContext(ctx, network, fbDialer.ServerAddr) ++ if fbErr != nil { ++ return nil, fmt.Errorf("error fallback to %s: %v; S2A error: %w", fbDialer.ServerAddr, fbErr, err) ++ } ++ return fbConn, nil ++ } ++ return nil, err ++ } ++ ++ factory, err := NewTLSClientConfigFactory(opts) ++ if err != nil { ++ grpclog.Infof("error creating S2A client config factory: %v", err) ++ return fallback(err) ++ } ++ ++ serverName, _, err := net.SplitHostPort(addr) ++ if err != nil { ++ serverName = addr ++ } ++ timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout()) ++ defer cancel() ++ s2aTLSConfig, err := factory.Build(timeoutCtx, &TLSClientConfigOptions{ ++ ServerName: serverName, ++ }) ++ if err != nil { ++ grpclog.Infof("error building S2A TLS config: %v", err) ++ return fallback(err) ++ } ++ ++ s2aDialer := &tls.Dialer{ ++ Config: s2aTLSConfig, ++ } ++ c, err := s2aDialer.DialContext(ctx, network, addr) ++ if err != nil { ++ grpclog.Infof("error dialing with S2A to %s: %v", addr, err) ++ return fallback(err) ++ } ++ grpclog.Infof("success dialing MTLS to %s with S2A", addr) ++ return c, nil ++ } ++} +diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go +new file mode 100644 +index 00000000000..94feafb9cf8 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/s2a_options.go +@@ -0,0 +1,208 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package s2a ++ ++import ( ++ "context" ++ "crypto/tls" ++ "errors" ++ "sync" ++ ++ "github.com/google/s2a-go/fallback" ++ "github.com/google/s2a-go/stream" ++ ++ s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" ++) ++ ++// Identity is the interface for S2A identities. ++type Identity interface { ++ // Name returns the name of the identity. ++ Name() string ++} ++ ++type spiffeID struct { ++ spiffeID string ++} ++ ++func (s *spiffeID) Name() string { return s.spiffeID } ++ ++// NewSpiffeID creates a SPIFFE ID from id. ++func NewSpiffeID(id string) Identity { ++ return &spiffeID{spiffeID: id} ++} ++ ++type hostname struct { ++ hostname string ++} ++ ++func (h *hostname) Name() string { return h.hostname } ++ ++// NewHostname creates a hostname from name. ++func NewHostname(name string) Identity { ++ return &hostname{hostname: name} ++} ++ ++type uid struct { ++ uid string ++} ++ ++func (h *uid) Name() string { return h.uid } ++ ++// NewUID creates a UID from name. ++func NewUID(name string) Identity { ++ return &uid{uid: name} ++} ++ ++// VerificationModeType specifies the mode that S2A must use to verify the peer ++// certificate chain. ++type VerificationModeType int ++ ++// Three types of verification modes. ++const ( ++ Unspecified = iota ++ ConnectToGoogle ++ Spiffe ++) ++ ++// ClientOptions contains the client-side options used to establish a secure ++// channel using the S2A handshaker service. ++type ClientOptions struct { ++ // TargetIdentities contains a list of allowed server identities. One of the ++ // target identities should match the peer identity in the handshake ++ // result; otherwise, the handshake fails. ++ TargetIdentities []Identity ++ // LocalIdentity is the local identity of the client application. If none is ++ // provided, then the S2A will choose the default identity, if one exists. ++ LocalIdentity Identity ++ // S2AAddress is the address of the S2A. ++ S2AAddress string ++ // EnsureProcessSessionTickets waits for all session tickets to be sent to ++ // S2A before a process completes. ++ // ++ // This functionality is crucial for processes that complete very soon after ++ // using S2A to establish a TLS connection, but it can be ignored for longer ++ // lived processes. ++ // ++ // Usage example: ++ // func main() { ++ // var ensureProcessSessionTickets sync.WaitGroup ++ // clientOpts := &s2a.ClientOptions{ ++ // EnsureProcessSessionTickets: &ensureProcessSessionTickets, ++ // // Set other members. ++ // } ++ // creds, _ := s2a.NewClientCreds(clientOpts) ++ // conn, _ := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) ++ // defer conn.Close() ++ // ++ // // Make RPC call. ++ // ++ // // The process terminates right after the RPC call ends. ++ // // ensureProcessSessionTickets can be used to ensure resumption ++ // // tickets are fully processed. If the process is long-lived, using ++ // // ensureProcessSessionTickets is not necessary. ++ // ensureProcessSessionTickets.Wait() ++ // } ++ EnsureProcessSessionTickets *sync.WaitGroup ++ // If true, enables the use of legacy S2Av1. ++ EnableLegacyMode bool ++ // VerificationMode specifies the mode that S2A must use to verify the ++ // peer certificate chain. ++ VerificationMode VerificationModeType ++ ++ // Optional fallback after dialing with S2A fails. ++ FallbackOpts *FallbackOptions ++ ++ // Generates an S2AStream interface for talking to the S2A server. ++ getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) ++ ++ // Serialized user specified policy for server authorization. ++ serverAuthorizationPolicy []byte ++} ++ ++// FallbackOptions prescribes the fallback logic that should be taken if the application fails to connect with S2A. ++type FallbackOptions struct { ++ // FallbackClientHandshakeFunc is used to specify fallback behavior when calling s2a.NewClientCreds(). ++ // It will be called by ClientHandshake function, after handshake with S2A fails. ++ // s2a.NewClientCreds() ignores the other FallbackDialer field. ++ FallbackClientHandshakeFunc fallback.ClientHandshake ++ ++ // FallbackDialer is used to specify fallback behavior when calling s2a.NewS2aDialTLSContextFunc(). ++ // It passes in a custom fallback dialer and server address to use after dialing with S2A fails. ++ // s2a.NewS2aDialTLSContextFunc() ignores the other FallbackClientHandshakeFunc field. ++ FallbackDialer *FallbackDialer ++} ++ ++// FallbackDialer contains a fallback tls.Dialer and a server address to connect to. ++type FallbackDialer struct { ++ // Dialer specifies a fallback tls.Dialer. ++ Dialer *tls.Dialer ++ // ServerAddr is used by Dialer to establish fallback connection. ++ ServerAddr string ++} ++ ++// DefaultClientOptions returns the default client options. ++func DefaultClientOptions(s2aAddress string) *ClientOptions { ++ return &ClientOptions{ ++ S2AAddress: s2aAddress, ++ VerificationMode: ConnectToGoogle, ++ } ++} ++ ++// ServerOptions contains the server-side options used to establish a secure ++// channel using the S2A handshaker service. ++type ServerOptions struct { ++ // LocalIdentities is the list of local identities that may be assumed by ++ // the server. If no local identity is specified, then the S2A chooses a ++ // default local identity, if one exists. ++ LocalIdentities []Identity ++ // S2AAddress is the address of the S2A. ++ S2AAddress string ++ // If true, enables the use of legacy S2Av1. ++ EnableLegacyMode bool ++ // VerificationMode specifies the mode that S2A must use to verify the ++ // peer certificate chain. ++ VerificationMode VerificationModeType ++ ++ // Generates an S2AStream interface for talking to the S2A server. ++ getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) ++} ++ ++// DefaultServerOptions returns the default server options. ++func DefaultServerOptions(s2aAddress string) *ServerOptions { ++ return &ServerOptions{ ++ S2AAddress: s2aAddress, ++ VerificationMode: ConnectToGoogle, ++ } ++} ++ ++func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { ++ if identity == nil { ++ return nil, nil ++ } ++ switch id := identity.(type) { ++ case *spiffeID: ++ return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil ++ case *hostname: ++ return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil ++ case *uid: ++ return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil ++ default: ++ return nil, errors.New("unrecognized identity type") ++ } ++} +diff --git a/vendor/github.com/google/s2a-go/s2a_utils.go b/vendor/github.com/google/s2a-go/s2a_utils.go +new file mode 100644 +index 00000000000..d649cc46148 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/s2a_utils.go +@@ -0,0 +1,79 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package s2a ++ ++import ( ++ "context" ++ "errors" ++ ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/peer" ++) ++ ++// AuthInfo exposes security information from the S2A to the application. ++type AuthInfo interface { ++ // AuthType returns the authentication type. ++ AuthType() string ++ // ApplicationProtocol returns the application protocol, e.g. "grpc". ++ ApplicationProtocol() string ++ // TLSVersion returns the TLS version negotiated during the handshake. ++ TLSVersion() commonpb.TLSVersion ++ // Ciphersuite returns the ciphersuite negotiated during the handshake. ++ Ciphersuite() commonpb.Ciphersuite ++ // PeerIdentity returns the authenticated identity of the peer. ++ PeerIdentity() *commonpb.Identity ++ // LocalIdentity returns the local identity of the application used during ++ // session setup. ++ LocalIdentity() *commonpb.Identity ++ // PeerCertFingerprint returns the SHA256 hash of the peer certificate used in ++ // the S2A handshake. ++ PeerCertFingerprint() []byte ++ // LocalCertFingerprint returns the SHA256 hash of the local certificate used ++ // in the S2A handshake. ++ LocalCertFingerprint() []byte ++ // IsHandshakeResumed returns true if a cached session was used to resume ++ // the handshake. ++ IsHandshakeResumed() bool ++ // SecurityLevel returns the security level of the connection. ++ SecurityLevel() credentials.SecurityLevel ++} ++ ++// AuthInfoFromPeer extracts the authinfo.S2AAuthInfo object from the given ++// peer, if it exists. This API should be used by gRPC clients after ++// obtaining a peer object using the grpc.Peer() CallOption. ++func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { ++ s2aAuthInfo, ok := p.AuthInfo.(AuthInfo) ++ if !ok { ++ return nil, errors.New("no S2AAuthInfo found in Peer") ++ } ++ return s2aAuthInfo, nil ++} ++ ++// AuthInfoFromContext extracts the authinfo.S2AAuthInfo object from the given ++// context, if it exists. This API should be used by gRPC server RPC handlers ++// to get information about the peer. On the client-side, use the grpc.Peer() ++// CallOption and the AuthInfoFromPeer function. ++func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { ++ p, ok := peer.FromContext(ctx) ++ if !ok { ++ return nil, errors.New("no Peer found in Context") ++ } ++ return AuthInfoFromPeer(p) ++} +diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go +new file mode 100644 +index 00000000000..584bf32b1c7 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/stream/s2a_stream.go +@@ -0,0 +1,34 @@ ++/* ++ * ++ * Copyright 2023 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package stream provides an interface for bidirectional streaming to the S2A server. ++package stream ++ ++import ( ++ s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ++) ++ ++// S2AStream defines the operation for communicating with the S2A server over a bidirectional stream. ++type S2AStream interface { ++ // Send sends the message to the S2A server. ++ Send(*s2av2pb.SessionReq) error ++ // Recv receives the message from the S2A server. ++ Recv() (*s2av2pb.SessionResp, error) ++ // Closes the channel to the S2A server. ++ CloseSend() error ++} +diff --git a/vendor/github.com/google/s2a-go/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/testdata/client_cert.pem +new file mode 100644 +index 00000000000..493a5a26481 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/testdata/client_cert.pem +@@ -0,0 +1,24 @@ ++-----BEGIN CERTIFICATE----- ++MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL ++BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 ++YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE ++AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN ++MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ ++BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx ++ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ ++KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC ++AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 ++a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 ++OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 ++RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK ++P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 ++HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu ++0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl ++MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 ++EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 ++/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA ++QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ ++nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD ++X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco ++pKklVz0= ++-----END CERTIFICATE----- +diff --git a/vendor/github.com/google/s2a-go/testdata/client_key.pem b/vendor/github.com/google/s2a-go/testdata/client_key.pem +new file mode 100644 +index 00000000000..55a7f10c742 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/testdata/client_key.pem +@@ -0,0 +1,27 @@ ++-----BEGIN RSA PRIVATE KEY----- ++MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF ++l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj +++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G ++4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA ++xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh ++68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ ++/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL ++Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA ++VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 ++9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH ++MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt ++aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq ++xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx ++2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv ++EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z ++aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq ++udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs ++VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm ++56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT ++GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V ++Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm ++HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q ++BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH ++qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh ++GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ++-----END RSA PRIVATE KEY----- +diff --git a/vendor/github.com/google/s2a-go/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/testdata/server_cert.pem +new file mode 100644 +index 00000000000..0f98322c724 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/testdata/server_cert.pem +@@ -0,0 +1,24 @@ ++-----BEGIN CERTIFICATE----- ++MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL ++BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 ++YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE ++AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN ++MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ ++BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx ++ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ ++KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC ++AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT ++fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ ++qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE ++xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es ++Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 ++Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM ++ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX ++MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR ++e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X ++POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl ++AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg ++odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ ++PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN ++Dhm6uZM= ++-----END CERTIFICATE----- +diff --git a/vendor/github.com/google/s2a-go/testdata/server_key.pem b/vendor/github.com/google/s2a-go/testdata/server_key.pem +new file mode 100644 +index 00000000000..81afea783df +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/testdata/server_key.pem +@@ -0,0 +1,27 @@ ++-----BEGIN RSA PRIVATE KEY----- ++MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs ++8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO ++QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk ++XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA ++Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc ++gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf ++LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl ++jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 ++4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q ++Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P ++nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 ++drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE ++duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 ++L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG ++06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm ++eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD ++uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 ++lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL ++a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb ++hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ ++7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j ++r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 ++eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD ++B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz ++7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ++-----END RSA PRIVATE KEY----- +diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml +deleted file mode 100644 +index d8156a60ba9..00000000000 +--- a/vendor/github.com/google/uuid/.travis.yml ++++ /dev/null +@@ -1,9 +0,0 @@ +-language: go +- +-go: +- - 1.4.3 +- - 1.5.3 +- - tip +- +-script: +- - go test -v ./... +diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md +new file mode 100644 +index 00000000000..2bd78667afb +--- /dev/null ++++ b/vendor/github.com/google/uuid/CHANGELOG.md +@@ -0,0 +1,10 @@ ++# Changelog ++ ++## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) ++ ++ ++### Bug Fixes ++ ++* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) ++ ++## Changelog +diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md +index 04fdf09f136..5566888726d 100644 +--- a/vendor/github.com/google/uuid/CONTRIBUTING.md ++++ b/vendor/github.com/google/uuid/CONTRIBUTING.md +@@ -2,6 +2,22 @@ + + We definitely welcome patches and contribution to this project! + ++### Tips ++ ++Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). ++ ++Always try to include a test case! If it is not possible or not necessary, ++please explain why in the pull request description. ++ ++### Releasing ++ ++Commits that would precipitate a SemVer change, as desrcibed in the Conventional ++Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) ++to create a release candidate pull request. Once submitted, `release-please` ++will create a release. ++ ++For tips on how to work with `release-please`, see its documentation. ++ + ### Legal requirements + + In order to protect both you and ourselves, you will need to sign the +diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md +index f765a46f915..3e9a61889de 100644 +--- a/vendor/github.com/google/uuid/README.md ++++ b/vendor/github.com/google/uuid/README.md +@@ -1,6 +1,6 @@ +-# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) ++# uuid + The uuid package generates and inspects UUIDs based on +-[RFC 4122](http://tools.ietf.org/html/rfc4122) ++[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) + and DCE 1.1: Authentication and Security Services. + + This package is based on the github.com/pborman/uuid package (previously named +@@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this + change is the ability to represent an invalid UUID (vs a NIL UUID). + + ###### Install +-`go get github.com/google/uuid` ++```sh ++go get github.com/google/uuid ++``` + + ###### Documentation +-[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) ++[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) + + Full `go doc` style documentation for the package can be viewed online without + installing this package by using the GoDoc site here: +diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go +index 24b78edc907..b2a0bc8711b 100644 +--- a/vendor/github.com/google/uuid/node_js.go ++++ b/vendor/github.com/google/uuid/node_js.go +@@ -7,6 +7,6 @@ + package uuid + + // getHardwareInterface returns nil values for the JS version of the code. +-// This remvoves the "net" dependency, because it is not used in the browser. ++// This removes the "net" dependency, because it is not used in the browser. + // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. + func getHardwareInterface(name string) (string, []byte) { return "", nil } +diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go +index a57207aeb6f..a56138cc4bd 100644 +--- a/vendor/github.com/google/uuid/uuid.go ++++ b/vendor/github.com/google/uuid/uuid.go +@@ -69,7 +69,7 @@ func Parse(s string) (UUID, error) { + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: +- if strings.ToLower(s[:9]) != "urn:uuid:" { ++ if !strings.EqualFold(s[:9], "urn:uuid:") { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] +@@ -101,7 +101,8 @@ func Parse(s string) (UUID, error) { + 9, 11, + 14, 16, + 19, 21, +- 24, 26, 28, 30, 32, 34} { ++ 24, 26, 28, 30, 32, 34, ++ } { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") +@@ -117,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +- if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { ++ if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] +@@ -145,7 +146,8 @@ func ParseBytes(b []byte) (UUID, error) { + 9, 11, + 14, 16, + 19, 21, +- 24, 26, 28, 30, 32, 34} { ++ 24, 26, 28, 30, 32, 34, ++ } { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") +diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +index 10295639c5a..91d60a809fa 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json ++++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +@@ -1,3 +1,3 @@ + { +- "v2": "2.7.1" ++ "v2": "2.11.0" + } +diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +index 41a7ca94d4d..e17b196f6c7 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md ++++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +@@ -1,5 +1,50 @@ + # Changelog + ++## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) ++ ++ ++### Features ++ ++* **v2:** add GoVersion package variable ([#283](https://github.com/googleapis/gax-go/issues/283)) ([26553cc](https://github.com/googleapis/gax-go/commit/26553ccadb4016b189881f52e6c253b68bb3e3d5)) ++ ++ ++### Bug Fixes ++ ++* **v2:** handle space in non-devel go version ([#288](https://github.com/googleapis/gax-go/issues/288)) ([fd7bca0](https://github.com/googleapis/gax-go/commit/fd7bca029a1c5e63def8f0a5fd1ec3f725d92f75)) ++ ++## [2.10.0](https://github.com/googleapis/gax-go/compare/v2.9.1...v2.10.0) (2023-05-30) ++ ++ ++### Features ++ ++* update dependencies ([#280](https://github.com/googleapis/gax-go/issues/280)) ([4514281](https://github.com/googleapis/gax-go/commit/4514281058590f3637c36bfd49baa65c4d3cfb21)) ++ ++## [2.9.1](https://github.com/googleapis/gax-go/compare/v2.9.0...v2.9.1) (2023-05-23) ++ ++ ++### Bug Fixes ++ ++* **v2:** drop cloud lro test dep ([#276](https://github.com/googleapis/gax-go/issues/276)) ([c67eeba](https://github.com/googleapis/gax-go/commit/c67eeba0f10a3294b1d93c1b8fbe40211a55ae5f)), refs [#270](https://github.com/googleapis/gax-go/issues/270) ++ ++## [2.9.0](https://github.com/googleapis/gax-go/compare/v2.8.0...v2.9.0) (2023-05-22) ++ ++ ++### Features ++ ++* **apierror:** add method to return HTTP status code conditionally ([#274](https://github.com/googleapis/gax-go/issues/274)) ([5874431](https://github.com/googleapis/gax-go/commit/587443169acd10f7f86d1989dc8aaf189e645e98)), refs [#229](https://github.com/googleapis/gax-go/issues/229) ++ ++ ++### Documentation ++ ++* add ref to usage with clients ([#272](https://github.com/googleapis/gax-go/issues/272)) ([ea4d72d](https://github.com/googleapis/gax-go/commit/ea4d72d514beba4de450868b5fb028601a29164e)), refs [#228](https://github.com/googleapis/gax-go/issues/228) ++ ++## [2.8.0](https://github.com/googleapis/gax-go/compare/v2.7.1...v2.8.0) (2023-03-15) ++ ++ ++### Features ++ ++* **v2:** add WithTimeout option ([#259](https://github.com/googleapis/gax-go/issues/259)) ([9a8da43](https://github.com/googleapis/gax-go/commit/9a8da43693002448b1e8758023699387481866d1)) ++ + ## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) + + +diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +index ed862c8b398..d785a065cab 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go ++++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +@@ -29,6 +29,10 @@ + + // Package apierror implements a wrapper error for parsing error details from + // API calls. Both HTTP & gRPC status errors are supported. ++// ++// For examples of how to use [APIError] with client libraries please reference ++// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors) ++// in the client library documentation. + package apierror + + import ( +@@ -345,3 +349,13 @@ func parseHTTPDetails(gae *googleapi.Error) ErrDetails { + + return parseDetails(details) + } ++ ++// HTTPCode returns the underlying HTTP response status code. This method returns ++// `-1` if the underlying error is a [google.golang.org/grpc/status.Status]. To ++// check gRPC error codes use [google.golang.org/grpc/status.Code]. ++func (a *APIError) HTTPCode() int { ++ if a.httpErr == nil { ++ return -1 ++ } ++ return a.httpErr.Code ++} +diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go +index e092005563b..c52e03f6436 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/call_option.go ++++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go +@@ -218,6 +218,14 @@ func (p pathOpt) Resolve(s *CallSettings) { + s.Path = p.p + } + ++type timeoutOpt struct { ++ t time.Duration ++} ++ ++func (t timeoutOpt) Resolve(s *CallSettings) { ++ s.timeout = t.t ++} ++ + // WithPath applies a Path override to the HTTP-based APICall. + // + // This is for internal use only. +@@ -230,6 +238,15 @@ func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) + } + ++// WithTimeout is a convenience option for setting a context.WithTimeout on the ++// singular context.Context used for **all** APICall attempts. Calculated from ++// the start of the first APICall attempt. ++// If the context.Context provided to Invoke already has a Deadline set, that ++// will always be respected over the deadline calculated using this option. ++func WithTimeout(t time.Duration) CallOption { ++ return &timeoutOpt{t: t} ++} ++ + // CallSettings allow fine-grained control over how calls are made. + type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. +@@ -241,4 +258,8 @@ type CallSettings struct { + + // Path is an HTTP override for an APICall. + Path string ++ ++ // Timeout defines the amount of time that Invoke has to complete. ++ // Unexported so it cannot be changed by the code in an APICall. ++ timeout time.Duration + } +diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go +index 139371a0bf1..6488461f4dc 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/header.go ++++ b/vendor/github.com/googleapis/gax-go/v2/header.go +@@ -29,7 +29,73 @@ + + package gax + +-import "bytes" ++import ( ++ "bytes" ++ "runtime" ++ "strings" ++ "unicode" ++) ++ ++var ( ++ // GoVersion is a header-safe representation of the current runtime ++ // environment's Go version. This is for GAX consumers that need to ++ // report the Go runtime version in API calls. ++ GoVersion string ++ // version is a package internal global variable for testing purposes. ++ version = runtime.Version ++) ++ ++// versionUnknown is only used when the runtime version cannot be determined. ++const versionUnknown = "UNKNOWN" ++ ++func init() { ++ GoVersion = goVersion() ++} ++ ++// goVersion returns a Go runtime version derived from the runtime environment ++// that is modified to be suitable for reporting in a header, meaning it has no ++// whitespace. If it is unable to determine the Go runtime version, it returns ++// versionUnknown. ++func goVersion() string { ++ const develPrefix = "devel +" ++ ++ s := version() ++ if strings.HasPrefix(s, develPrefix) { ++ s = s[len(develPrefix):] ++ if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { ++ s = s[:p] ++ } ++ return s ++ } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { ++ s = s[:p] ++ } ++ ++ notSemverRune := func(r rune) bool { ++ return !strings.ContainsRune("0123456789.", r) ++ } ++ ++ if strings.HasPrefix(s, "go1") { ++ s = s[2:] ++ var prerelease string ++ if p := strings.IndexFunc(s, notSemverRune); p >= 0 { ++ s, prerelease = s[:p], s[p:] ++ } ++ if strings.HasSuffix(s, ".") { ++ s += "0" ++ } else if strings.Count(s, ".") < 2 { ++ s += ".0" ++ } ++ if prerelease != "" { ++ // Some release candidates already have a dash in them. ++ if !strings.HasPrefix(prerelease, "-") { ++ prerelease = "-" + prerelease ++ } ++ s += prerelease ++ } ++ return s ++ } ++ return "UNKNOWN" ++} + + // XGoogHeader is for use by the Google Cloud Libraries only. + // +diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go +index 936873ec4f8..374dcdb1151 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go ++++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go +@@ -30,4 +30,4 @@ + package internal + + // Version is the current tagged release of the library. +-const Version = "2.7.1" ++const Version = "2.11.0" +diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go +index 9fcc29959b9..721d1af5517 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/invoke.go ++++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go +@@ -68,6 +68,16 @@ type sleeper func(ctx context.Context, d time.Duration) error + // invoke implements Invoke, taking an additional sleeper argument for testing. + func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer ++ ++ // Only use the value provided via WithTimeout if the context doesn't ++ // already have a deadline. This is important for backwards compatibility if ++ // the user already set a deadline on the context given to Invoke. ++ if _, ok := ctx.Deadline(); !ok && settings.timeout != 0 { ++ c, cc := context.WithTimeout(ctx, settings.timeout) ++ defer cc() ++ ctx = c ++ } ++ + for { + err := call(ctx, settings) + if err == nil { +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go +index 138f7c12f0e..c056bd3058a 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go +@@ -1,10 +1,10 @@ ++//go:build gofuzz + // +build gofuzz + + package httprule + + func Fuzz(data []byte) int { +- _, err := Parse(string(data)) +- if err != nil { ++ if _, err := Parse(string(data)); err != nil { + return 0 + } + return 0 +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go +index 5edd784e62a..65ffcf5cf87 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go +@@ -1,6 +1,7 @@ + package httprule + + import ( ++ "errors" + "fmt" + "strings" + ) +@@ -164,9 +165,9 @@ func (p *parser) segment() (segment, error) { + + v, err := p.variable() + if err != nil { +- return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err) ++ return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err) + } +- return v, err ++ return v, nil + } + + func (p *parser) literal() (segment, error) { +@@ -191,7 +192,7 @@ func (p *parser) variable() (segment, error) { + if _, err := p.accept("="); err == nil { + segs, err = p.segments() + if err != nil { +- return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err) ++ return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err) + } + } else { + segs = []segment{wildcard{}} +@@ -213,12 +214,12 @@ func (p *parser) fieldPath() (string, error) { + } + components := []string{c} + for { +- if _, err = p.accept("."); err != nil { ++ if _, err := p.accept("."); err != nil { + return strings.Join(components, "."), nil + } + c, err := p.accept(typeIdent) + if err != nil { +- return "", fmt.Errorf("invalid field path component: %v", err) ++ return "", fmt.Errorf("invalid field path component: %w", err) + } + components = append(components, c) + } +@@ -237,10 +238,8 @@ const ( + typeEOF = termType("$") + ) + +-const ( +- // eof is the terminal symbol which always appears at the end of token sequence. +- eof = "\u0000" +-) ++// eof is the terminal symbol which always appears at the end of token sequence. ++const eof = "\u0000" + + // accept tries to accept a token in "p". + // This function consumes a token and returns it if it matches to the specified "term". +@@ -275,11 +274,12 @@ func (p *parser) accept(term termType) (string, error) { + // expectPChars determines if "t" consists of only pchars defined in RFC3986. + // + // https://www.ietf.org/rfc/rfc3986.txt, P.49 +-// pchar = unreserved / pct-encoded / sub-delims / ":" / "@" +-// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" +-// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" +-// / "*" / "+" / "," / ";" / "=" +-// pct-encoded = "%" HEXDIG HEXDIG ++// ++// pchar = unreserved / pct-encoded / sub-delims / ":" / "@" ++// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" ++// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" ++// / "*" / "+" / "," / ";" / "=" ++// pct-encoded = "%" HEXDIG HEXDIG + func expectPChars(t string) error { + const ( + init = iota +@@ -333,7 +333,7 @@ func expectPChars(t string) error { + // expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*). + func expectIdent(ident string) error { + if ident == "" { +- return fmt.Errorf("empty identifier") ++ return errors.New("empty identifier") + } + for pos, r := range ident { + switch { +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +index 95f867a5286..a8789f17022 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +@@ -27,9 +27,9 @@ go_library( + "//internal/httprule", + "//utilities", + "@go_googleapis//google/api:httpbody_go_proto", +- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//grpclog", ++ "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", +@@ -37,6 +37,8 @@ go_library( + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//reflect/protoregistry", + "@org_golang_google_protobuf//types/known/durationpb", ++ "@org_golang_google_protobuf//types/known/fieldmaskpb", ++ "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +@@ -56,8 +58,10 @@ go_test( + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", ++ "mux_internal_test.go", + "mux_test.go", + "pattern_test.go", ++ "query_fuzz_test.go", + "query_test.go", + ], + embed = [":runtime"], +@@ -69,8 +73,9 @@ go_test( + "@go_googleapis//google/api:httpbody_go_proto", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@go_googleapis//google/rpc:status_go_proto", +- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", ++ "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", ++ "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", +@@ -78,6 +83,7 @@ go_test( + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", ++ "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +index fb57b9366ea..31553e7848a 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +@@ -13,6 +13,7 @@ import ( + "time" + + "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + ) +@@ -35,11 +36,15 @@ const metadataHeaderBinarySuffix = "-Bin" + const xForwardedFor = "X-Forwarded-For" + const xForwardedHost = "X-Forwarded-Host" + +-var ( +- // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound +- // header isn't present. If the value is 0 the sent `context` will not have a timeout. +- DefaultContextTimeout = 0 * time.Second +-) ++// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound ++// header isn't present. If the value is 0 the sent `context` will not have a timeout. ++var DefaultContextTimeout = 0 * time.Second ++ ++// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed. ++// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context. ++var malformedHTTPHeaders = map[string]struct{}{ ++ "connection": {}, ++} + + type ( + rpcMethodKey struct{} +@@ -95,12 +100,43 @@ func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Reque + return metadata.NewIncomingContext(ctx, md), nil + } + ++func isValidGRPCMetadataKey(key string) bool { ++ // Must be a valid gRPC "Header-Name" as defined here: ++ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md ++ // This means 0-9 a-z _ - . ++ // Only lowercase letters are valid in the wire protocol, but the client library will normalize ++ // uppercase ASCII to lowercase, so uppercase ASCII is also acceptable. ++ bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode. ++ for _, ch := range bytes { ++ validLowercaseLetter := ch >= 'a' && ch <= 'z' ++ validUppercaseLetter := ch >= 'A' && ch <= 'Z' ++ validDigit := ch >= '0' && ch <= '9' ++ validOther := ch == '.' || ch == '-' || ch == '_' ++ if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther { ++ return false ++ } ++ } ++ return true ++} ++ ++func isValidGRPCMetadataTextValue(textValue string) bool { ++ // Must be a valid gRPC "ASCII-Value" as defined here: ++ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md ++ // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive. ++ bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode. ++ for _, ch := range bytes { ++ if ch < 0x20 || ch > 0x7E { ++ return false ++ } ++ } ++ return true ++} ++ + func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) { + ctx = withRPCMethod(ctx, rpcMethodName) + for _, o := range options { + ctx = o(ctx) + } +- var pairs []string + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error +@@ -109,7 +145,7 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } +- ++ var pairs []string + for key, vals := range req.Header { + key = textproto.CanonicalMIMEHeaderKey(key) + for _, val := range vals { +@@ -118,6 +154,10 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { ++ if !isValidGRPCMetadataKey(h) { ++ grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h) ++ continue ++ } + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { +@@ -127,6 +167,9 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM + } + + val = string(b) ++ } else if !isValidGRPCMetadataTextValue(val) { ++ grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h) ++ continue + } + pairs = append(pairs, h, val) + } +@@ -172,11 +215,17 @@ type serverMetadataKey struct{} + + // NewServerMetadataContext creates a new context with ServerMetadata + func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { ++ if ctx == nil { ++ ctx = context.Background() ++ } + return context.WithValue(ctx, serverMetadataKey{}, md) + } + + // ServerMetadataFromContext returns the ServerMetadata in ctx + func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { ++ if ctx == nil { ++ return md, false ++ } + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return + } +@@ -269,8 +318,8 @@ func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + case 'n': + return time.Nanosecond, true + default: ++ return + } +- return + } + + // isPermanentHTTPHeader checks whether hdr belongs to the list of +@@ -308,6 +357,13 @@ func isPermanentHTTPHeader(hdr string) bool { + return false + } + ++// isMalformedHTTPHeader checks whether header belongs to the list of ++// "malformed headers" and would be rejected by the gRPC server. ++func isMalformedHTTPHeader(header string) bool { ++ _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)] ++ return isMalformed ++} ++ + // RPCMethod returns the method string for the server context. The returned + // string is in the format of "/package.service/method". + func RPCMethod(ctx context.Context) (string, bool) { +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +index e6bc4e6ceec..d7b15fcfb3f 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +@@ -37,7 +37,7 @@ func BoolSlice(val, sep string) ([]bool, error) { + for i, v := range s { + value, err := Bool(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -57,7 +57,7 @@ func Float64Slice(val, sep string) ([]float64, error) { + for i, v := range s { + value, err := Float64(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -81,7 +81,7 @@ func Float32Slice(val, sep string) ([]float32, error) { + for i, v := range s { + value, err := Float32(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -101,7 +101,7 @@ func Int64Slice(val, sep string) ([]int64, error) { + for i, v := range s { + value, err := Int64(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -125,7 +125,7 @@ func Int32Slice(val, sep string) ([]int32, error) { + for i, v := range s { + value, err := Int32(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -145,7 +145,7 @@ func Uint64Slice(val, sep string) ([]uint64, error) { + for i, v := range s { + value, err := Uint64(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -169,7 +169,7 @@ func Uint32Slice(val, sep string) ([]uint32, error) { + for i, v := range s { + value, err := Uint32(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -197,7 +197,7 @@ func BytesSlice(val, sep string) ([][]byte, error) { + for i, v := range s { + value, err := Bytes(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -209,8 +209,7 @@ func Timestamp(val string) (*timestamppb.Timestamp, error) { + var r timestamppb.Timestamp + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} +- err := unmarshaler.Unmarshal([]byte(val), &r) +- if err != nil { ++ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +@@ -221,8 +220,7 @@ func Duration(val string) (*durationpb.Duration, error) { + var r durationpb.Duration + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} +- err := unmarshaler.Unmarshal([]byte(val), &r) +- if err != nil { ++ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +@@ -257,66 +255,64 @@ func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } + return values, nil + } + +-/* +- Support fot google.protobuf.wrappers on top of primitive types +-*/ ++// Support for google.protobuf.wrappers on top of primitive types + + // StringValue well-known type support as wrapper around string type + func StringValue(val string) (*wrapperspb.StringValue, error) { +- return &wrapperspb.StringValue{Value: val}, nil ++ return wrapperspb.String(val), nil + } + + // FloatValue well-known type support as wrapper around float32 type + func FloatValue(val string) (*wrapperspb.FloatValue, error) { + parsedVal, err := Float32(val) +- return &wrapperspb.FloatValue{Value: parsedVal}, err ++ return wrapperspb.Float(parsedVal), err + } + + // DoubleValue well-known type support as wrapper around float64 type + func DoubleValue(val string) (*wrapperspb.DoubleValue, error) { + parsedVal, err := Float64(val) +- return &wrapperspb.DoubleValue{Value: parsedVal}, err ++ return wrapperspb.Double(parsedVal), err + } + + // BoolValue well-known type support as wrapper around bool type + func BoolValue(val string) (*wrapperspb.BoolValue, error) { + parsedVal, err := Bool(val) +- return &wrapperspb.BoolValue{Value: parsedVal}, err ++ return wrapperspb.Bool(parsedVal), err + } + + // Int32Value well-known type support as wrapper around int32 type + func Int32Value(val string) (*wrapperspb.Int32Value, error) { + parsedVal, err := Int32(val) +- return &wrapperspb.Int32Value{Value: parsedVal}, err ++ return wrapperspb.Int32(parsedVal), err + } + + // UInt32Value well-known type support as wrapper around uint32 type + func UInt32Value(val string) (*wrapperspb.UInt32Value, error) { + parsedVal, err := Uint32(val) +- return &wrapperspb.UInt32Value{Value: parsedVal}, err ++ return wrapperspb.UInt32(parsedVal), err + } + + // Int64Value well-known type support as wrapper around int64 type + func Int64Value(val string) (*wrapperspb.Int64Value, error) { + parsedVal, err := Int64(val) +- return &wrapperspb.Int64Value{Value: parsedVal}, err ++ return wrapperspb.Int64(parsedVal), err + } + + // UInt64Value well-known type support as wrapper around uint64 type + func UInt64Value(val string) (*wrapperspb.UInt64Value, error) { + parsedVal, err := Uint64(val) +- return &wrapperspb.UInt64Value{Value: parsedVal}, err ++ return wrapperspb.UInt64(parsedVal), err + } + + // BytesValue well-known type support as wrapper around bytes[] type + func BytesValue(val string) (*wrapperspb.BytesValue, error) { + parsedVal, err := Bytes(val) +- return &wrapperspb.BytesValue{Value: parsedVal}, err ++ return wrapperspb.Bytes(parsedVal), err + } +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +index d9e0013c439..d2bcbb7d2a2 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +@@ -38,7 +38,7 @@ func HTTPStatusFromCode(code codes.Code) int { + case codes.OK: + return http.StatusOK + case codes.Canceled: +- return http.StatusRequestTimeout ++ return 499 + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: +@@ -70,10 +70,10 @@ func HTTPStatusFromCode(code codes.Code) int { + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError ++ default: ++ grpclog.Infof("Unknown gRPC error code: %v", code) ++ return http.StatusInternalServerError + } +- +- grpclog.Infof("Unknown gRPC error code: %v", code) +- return http.StatusInternalServerError + } + + // HTTPError uses the mux-configured error handler. +@@ -162,10 +162,11 @@ func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status { + + // DefaultRoutingErrorHandler is our default handler for routing errors. + // By default http error codes mapped on the following error codes: +-// NotFound -> grpc.NotFound +-// StatusBadRequest -> grpc.InvalidArgument +-// MethodNotAllowed -> grpc.Unimplemented +-// Other -> grpc.Internal, method is not expecting to be called for anything else ++// ++// NotFound -> grpc.NotFound ++// StatusBadRequest -> grpc.InvalidArgument ++// MethodNotAllowed -> grpc.Unimplemented ++// Other -> grpc.Internal, method is not expecting to be called for anything else + func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) { + sterr := status.Error(codes.Internal, "Unexpected routing error") + switch httpStatus { +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +index 0138ed2f769..a03dd166bd7 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +@@ -2,13 +2,14 @@ package runtime + + import ( + "encoding/json" ++ "errors" + "fmt" + "io" + "sort" + +- "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" ++ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" + ) + + func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor { +@@ -44,7 +45,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field + // if the item is an object, then enqueue all of its children + for k, v := range m { + if item.msg == nil { +- return nil, fmt.Errorf("JSON structure did not match request type") ++ return nil, errors.New("JSON structure did not match request type") + } + + fd := getFieldByName(item.msg.Descriptor().Fields(), k) +@@ -53,7 +54,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field + } + + if isDynamicProtoMessage(fd.Message()) { +- for _, p := range buildPathsBlindly(k, v) { ++ for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) { + newPath := p + if item.path != "" { + newPath = item.path + "." + newPath +@@ -63,7 +64,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field + continue + } + +- if isProtobufAnyMessage(fd.Message()) { ++ if isProtobufAnyMessage(fd.Message()) && !fd.IsList() { + _, hasTypeField := v.(map[string]interface{})["@type"] + if hasTypeField { + queue = append(queue, fieldMaskPathItem{path: k}) +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +index d1e21df4810..945f3a5ebf3 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +@@ -52,11 +52,11 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal + return + } + if err != nil { +- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) ++ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { +- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) ++ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + +@@ -82,15 +82,15 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal + + if err != nil { + grpclog.Infof("Failed to marshal response chunk: %v", err) +- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) ++ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } +- if _, err = w.Write(buf); err != nil { ++ if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to send response chunk: %v", err) + return + } + wroteHeader = true +- if _, err = w.Write(delimiter); err != nil { ++ if _, err := w.Write(delimiter); err != nil { + grpclog.Infof("Failed to send delimiter chunk: %v", err) + return + } +@@ -200,20 +200,24 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re + return nil + } + +-func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { ++func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(msg)) + w.WriteHeader(HTTPStatusFromCode(st.Code())) + } +- buf, merr := marshaler.Marshal(msg) +- if merr != nil { +- grpclog.Infof("Failed to marshal an error: %v", merr) ++ buf, err := marshaler.Marshal(msg) ++ if err != nil { ++ grpclog.Infof("Failed to marshal an error: %v", err) ++ return ++ } ++ if _, err := w.Write(buf); err != nil { ++ grpclog.Infof("Failed to notify error to client: %v", err) + return + } +- if _, werr := w.Write(buf); werr != nil { +- grpclog.Infof("Failed to notify error to client: %v", werr) ++ if _, err := w.Write(delimiter); err != nil { ++ grpclog.Infof("Failed to send delimiter chunk: %v", err) + return + } + } +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +index 7387c8e3976..51b8247da2a 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +@@ -92,23 +92,20 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer +- err := buf.WriteByte('[') +- if err != nil { ++ if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { +- err = buf.WriteByte(',') +- if err != nil { ++ if err := buf.WriteByte(','); err != nil { + return nil, err + } + } +- if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { ++ if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } +- err = buf.WriteByte(']') +- if err != nil { ++ if err := buf.WriteByte(']'); err != nil { + return nil, err + } + +@@ -117,17 +114,16 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + + if rv.Type().Elem().Implements(typeProtoEnum) { + var buf bytes.Buffer +- err := buf.WriteByte('[') +- if err != nil { ++ if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { +- err = buf.WriteByte(',') +- if err != nil { ++ if err := buf.WriteByte(','); err != nil { + return nil, err + } + } ++ var err error + if j.UseEnumNumbers { + _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10)) + } else { +@@ -137,8 +133,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + return nil, err + } + } +- err = buf.WriteByte(']') +- if err != nil { ++ if err := buf.WriteByte(']'); err != nil { + return nil, err + } + +@@ -219,8 +214,7 @@ func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v int + + // Decode into bytes for marshalling + var b json.RawMessage +- err := d.Decode(&b) +- if err != nil { ++ if err := d.Decode(&b); err != nil { + return err + } + +@@ -239,8 +233,7 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions + if rv.Type().ConvertibleTo(typeProtoMessage) { + // Decode into bytes for marshalling + var b json.RawMessage +- err := d.Decode(&b) +- if err != nil { ++ if err := d.Decode(&b); err != nil { + return err + } + +@@ -280,6 +273,17 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions + return nil + } + if rv.Kind() == reflect.Slice { ++ if rv.Type().Elem().Kind() == reflect.Uint8 { ++ var sl []byte ++ if err := d.Decode(&sl); err != nil { ++ return err ++ } ++ if sl != nil { ++ rv.SetBytes(sl) ++ } ++ return nil ++ } ++ + var sl []json.RawMessage + if err := d.Decode(&sl); err != nil { + return err +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go +index 007f8f1a2c7..398c780dc22 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go +@@ -1,10 +1,8 @@ + package runtime + + import ( +- "io" +- + "errors" +- "io/ioutil" ++ "io" + + "google.golang.org/protobuf/proto" + ) +@@ -38,7 +36,7 @@ func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + // NewDecoder returns a Decoder which reads proto stream from "reader". + func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { +- buffer, err := ioutil.ReadAll(reader) ++ buffer, err := io.ReadAll(reader) + if err != nil { + return err + } +@@ -53,8 +51,7 @@ func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + if err != nil { + return err + } +- _, err = writer.Write(buffer) +- if err != nil { ++ if _, err := writer.Write(buffer); err != nil { + return err + } + +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +index 46a4aabaf95..f451cb441f4 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +@@ -6,10 +6,13 @@ import ( + "fmt" + "net/http" + "net/textproto" ++ "regexp" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" + "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/grpclog" ++ "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +@@ -23,15 +26,15 @@ const ( + // path string before doing any routing. + UnescapingModeLegacy UnescapingMode = iota + +- // EscapingTypeExceptReserved unescapes all path parameters except RFC 6570 ++ // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570 + // reserved characters. + UnescapingModeAllExceptReserved + +- // EscapingTypeExceptSlash unescapes URL path parameters except path +- // seperators, which will be left as "%2F". ++ // UnescapingModeAllExceptSlash unescapes URL path parameters except path ++ // separators, which will be left as "%2F". + UnescapingModeAllExceptSlash + +- // URL path parameters will be fully decoded. ++ // UnescapingModeAllCharacters unescapes all URL path parameters. + UnescapingModeAllCharacters + + // UnescapingModeDefault is the default escaping type. +@@ -40,6 +43,8 @@ const ( + UnescapingModeDefault = UnescapingModeLegacy + ) + ++var encodedPathSplitter = regexp.MustCompile("(/|%2F)") ++ + // A HandlerFunc handles a specific pair of path pattern and HTTP method. + type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +@@ -75,7 +80,7 @@ func WithForwardResponseOption(forwardResponseOption func(context.Context, http. + } + } + +-// WithEscapingType sets the escaping type. See the definitions of UnescapingMode ++// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode + // for more information. + func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { + return func(serveMux *ServeMux) { +@@ -96,13 +101,14 @@ func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMux + type HeaderMatcherFunc func(string) (string, bool) + + // DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +-// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +-// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. ++// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function. ++// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'. ++// Other headers are not added to the gRPC metadata. + func DefaultHeaderMatcher(key string) (string, bool) { +- key = textproto.CanonicalMIMEHeaderKey(key) +- if isPermanentHTTPHeader(key) { ++ switch key = textproto.CanonicalMIMEHeaderKey(key); { ++ case isPermanentHTTPHeader(key): + return MetadataPrefix + key, true +- } else if strings.HasPrefix(key, MetadataHeaderPrefix) { ++ case strings.HasPrefix(key, MetadataHeaderPrefix): + return key[len(MetadataHeaderPrefix):], true + } + return "", false +@@ -113,11 +119,30 @@ func DefaultHeaderMatcher(key string) (string, bool) { + // This matcher will be called with each header in http.Request. If matcher returns true, that header will be + // passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. + func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { ++ for _, header := range fn.matchedMalformedHeaders() { ++ grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header) ++ } ++ + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } + } + ++// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server. ++func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string { ++ if fn == nil { ++ return nil ++ } ++ headers := make([]string, 0) ++ for header := range malformedHTTPHeaders { ++ out, accept := fn(header) ++ if accept && isMalformedHTTPHeader(out) { ++ headers = append(headers, out) ++ } ++ } ++ return headers ++} ++ + // WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. + // + // This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +@@ -179,6 +204,56 @@ func WithDisablePathLengthFallback() ServeMuxOption { + } + } + ++// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath. ++// When called the handler will forward the request to the upstream grpc service health check (defined in the ++// gRPC Health Checking Protocol). ++// ++// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how ++// to setup the protocol in the grpc server. ++// ++// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest. ++func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption { ++ return func(s *ServeMux) { ++ // error can be ignored since pattern is definitely valid ++ _ = s.HandlePath( ++ http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string, ++ ) { ++ _, outboundMarshaler := MarshalerForRequest(s, r) ++ ++ resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{ ++ Service: r.URL.Query().Get("service"), ++ }) ++ if err != nil { ++ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) ++ return ++ } ++ ++ w.Header().Set("Content-Type", "application/json") ++ ++ if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { ++ switch resp.GetStatus() { ++ case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN: ++ err = status.Error(codes.Unavailable, resp.String()) ++ case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN: ++ err = status.Error(codes.NotFound, resp.String()) ++ } ++ ++ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) ++ return ++ } ++ ++ _ = outboundMarshaler.NewEncoder(w).Encode(resp) ++ }) ++ } ++} ++ ++// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux. ++// ++// See WithHealthEndpointAt for the general implementation. ++func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption { ++ return WithHealthEndpointAt(healthCheckClient, "/healthz") ++} ++ + // NewServeMux returns a new ServeMux whose internal mapping is empty. + func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ +@@ -229,7 +304,7 @@ func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) er + return nil + } + +-// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. ++// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path. + func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + +@@ -245,8 +320,6 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + path = r.URL.RawPath + } + +- components := strings.Split(path[1:], "/") +- + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + r.Method = strings.ToUpper(override) + if err := r.ParseForm(); err != nil { +@@ -257,8 +330,18 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + } + } + +- // Verb out here is to memoize for the fallback case below +- var verb string ++ var pathComponents []string ++ // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F" ++ // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the ++ // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default ++ // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved ++ if s.unescapingMode == UnescapingModeAllCharacters { ++ pathComponents = encodedPathSplitter.Split(path[1:], -1) ++ } else { ++ pathComponents = strings.Split(path[1:], "/") ++ } ++ ++ lastPathComponent := pathComponents[len(pathComponents)-1] + + for _, h := range s.handlers[r.Method] { + // If the pattern has a verb, explicitly look for a suffix in the last +@@ -269,23 +352,28 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // parser because we know what verb we're looking for, however, there + // are still some cases that the parser itself cannot disambiguate. See + // the comment there if interested. ++ ++ var verb string + patVerb := h.pat.Verb() +- l := len(components) +- lastComponent := components[l-1] +- var idx int = -1 +- if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) { +- idx = len(lastComponent) - len(patVerb) - 1 ++ ++ idx := -1 ++ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { ++ idx = len(lastPathComponent) - len(patVerb) - 1 + } + if idx == 0 { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) + return + } ++ ++ comps := make([]string, len(pathComponents)) ++ copy(comps, pathComponents) ++ + if idx > 0 { +- components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:] ++ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] + } + +- pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode) ++ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { +@@ -301,14 +389,33 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + return + } + +- // lookup other methods to handle fallback from GET to POST and +- // to determine if it is NotImplemented or NotFound. ++ // if no handler has found for the request, lookup for other methods ++ // to handle POST -> GET fallback if the request is subject to path ++ // length fallback. ++ // Note we are not eagerly checking the request here as we want to return the ++ // right HTTP status code, and we need to process the fallback candidates in ++ // order to do that. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { +- pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode) ++ var verb string ++ patVerb := h.pat.Verb() ++ ++ idx := -1 ++ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { ++ idx = len(lastPathComponent) - len(patVerb) - 1 ++ } ++ ++ comps := make([]string, len(pathComponents)) ++ copy(comps, pathComponents) ++ ++ if idx > 0 { ++ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] ++ } ++ ++ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { +@@ -320,8 +427,11 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + } + continue + } ++ + // X-HTTP-Method-Override is optional. Always allow fallback to POST. +- if s.isPathLengthFallback(r) { ++ // Also, only consider POST -> GET fallbacks, and avoid falling back to ++ // potentially dangerous operations like DELETE. ++ if s.isPathLengthFallback(r) && m == http.MethodGet { + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go +index df7cb81426a..8f90d15a562 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go +@@ -15,8 +15,6 @@ var ( + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +- // ErrMalformedSequence indicates that an escape sequence was malformed. +- ErrMalformedSequence = errors.New("malformed escape sequence") + ) + + type MalformedSequenceError string +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +index fb0c84ef0cd..d01933c4fd2 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +@@ -1,7 +1,6 @@ + package runtime + + import ( +- "encoding/base64" + "errors" + "fmt" + "net/url" +@@ -11,19 +10,21 @@ import ( + "time" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" +- "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/grpc/grpclog" ++ "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/known/durationpb" ++ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" ++ "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" + ) + + var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`) + +-var currentQueryParser QueryParameterParser = &defaultQueryParser{} ++var currentQueryParser QueryParameterParser = &DefaultQueryParser{} + + // QueryParameterParser defines interface for all query parameter parsers + type QueryParameterParser interface { +@@ -36,14 +37,17 @@ func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utili + return currentQueryParser.Parse(msg, values, filter) + } + +-type defaultQueryParser struct{} ++// DefaultQueryParser is a QueryParameterParser which implements the default ++// query parameters parsing behavior. ++// ++// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context. ++type DefaultQueryParser struct{} + + // Parse populates "values" into "msg". + // A value is ignored if its key starts with one of the elements in "filter". +-func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { ++func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { +- match := valuesKeyRegexp.FindStringSubmatch(key) +- if len(match) == 3 { ++ if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } +@@ -175,10 +179,10 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro + return protoreflect.ValueOfBool(v), nil + case protoreflect.EnumKind: + enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName()) +- switch { +- case errors.Is(err, protoregistry.NotFound): +- return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName()) +- case err != nil: ++ if err != nil { ++ if errors.Is(err, protoregistry.NotFound) { ++ return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName()) ++ } + return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err) + } + // Look for enum by name +@@ -189,8 +193,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + // Look for enum by number +- v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)) +- if v == nil { ++ if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + } +@@ -234,7 +237,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro + case protoreflect.StringKind: + return protoreflect.ValueOfString(value), nil + case protoreflect.BytesKind: +- v, err := base64.URLEncoding.DecodeString(value) ++ v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } +@@ -250,18 +253,12 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p + var msg proto.Message + switch msgDescriptor.FullName() { + case "google.protobuf.Timestamp": +- if value == "null" { +- break +- } + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return protoreflect.Value{}, err + } + msg = timestamppb.New(t) + case "google.protobuf.Duration": +- if value == "null" { +- break +- } + d, err := time.ParseDuration(value) + if err != nil { + return protoreflect.Value{}, err +@@ -272,55 +269,67 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.DoubleValue{Value: v} ++ msg = wrapperspb.Double(v) + case "google.protobuf.FloatValue": + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.FloatValue{Value: float32(v)} ++ msg = wrapperspb.Float(float32(v)) + case "google.protobuf.Int64Value": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.Int64Value{Value: v} ++ msg = wrapperspb.Int64(v) + case "google.protobuf.Int32Value": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.Int32Value{Value: int32(v)} ++ msg = wrapperspb.Int32(int32(v)) + case "google.protobuf.UInt64Value": + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.UInt64Value{Value: v} ++ msg = wrapperspb.UInt64(v) + case "google.protobuf.UInt32Value": + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.UInt32Value{Value: uint32(v)} ++ msg = wrapperspb.UInt32(uint32(v)) + case "google.protobuf.BoolValue": + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.BoolValue{Value: v} ++ msg = wrapperspb.Bool(v) + case "google.protobuf.StringValue": +- msg = &wrapperspb.StringValue{Value: value} ++ msg = wrapperspb.String(value) + case "google.protobuf.BytesValue": +- v, err := base64.URLEncoding.DecodeString(value) ++ v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.BytesValue{Value: v} ++ msg = wrapperspb.Bytes(v) + case "google.protobuf.FieldMask": + fm := &field_mask.FieldMask{} + fm.Paths = append(fm.Paths, strings.Split(value, ",")...) + msg = fm ++ case "google.protobuf.Value": ++ var v structpb.Value ++ if err := protojson.Unmarshal([]byte(value), &v); err != nil { ++ return protoreflect.Value{}, err ++ } ++ msg = &v ++ case "google.protobuf.Struct": ++ var v structpb.Struct ++ if err := protojson.Unmarshal([]byte(value), &v); err != nil { ++ return protoreflect.Value{}, err ++ } ++ msg = &v + default: + return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName())) + } +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel +index 5d8d12bc421..b8940946577 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel +@@ -8,6 +8,7 @@ go_library( + "doc.go", + "pattern.go", + "readerfactory.go", ++ "string_array_flag.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities", +@@ -16,7 +17,10 @@ go_library( + go_test( + name = "utilities_test", + size = "small", +- srcs = ["trie_test.go"], ++ srcs = [ ++ "string_array_flag_test.go", ++ "trie_test.go", ++ ], + deps = [":utilities"], + ) + +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go +index 6dd3854665f..01d26edae3c 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go +@@ -3,13 +3,12 @@ package utilities + import ( + "bytes" + "io" +- "io/ioutil" + ) + + // IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins + // at the start of the stream + func IOReaderFactory(r io.Reader) (func() io.Reader, error) { +- b, err := ioutil.ReadAll(r) ++ b, err := io.ReadAll(r) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +new file mode 100644 +index 00000000000..d224ab776c0 +--- /dev/null ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +@@ -0,0 +1,33 @@ ++package utilities ++ ++import ( ++ "flag" ++ "strings" ++) ++ ++// flagInterface is an cut down interface to `flag` ++type flagInterface interface { ++ Var(value flag.Value, name string, usage string) ++} ++ ++// StringArrayFlag defines a flag with the specified name and usage string. ++// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag. ++func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags { ++ value := &StringArrayFlags{} ++ f.Var(value, name, usage) ++ return value ++} ++ ++// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var` ++type StringArrayFlags []string ++ ++// String returns a string representation of `StringArrayFlags` ++func (i *StringArrayFlags) String() string { ++ return strings.Join(*i, ",") ++} ++ ++// Set appends a value to `StringArrayFlags` ++func (i *StringArrayFlags) Set(value string) error { ++ *i = append(*i, value) ++ return nil ++} +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go +index af3b703d505..dd99b0ed256 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go +@@ -40,7 +40,7 @@ func NewDoubleArray(seqs [][]string) *DoubleArray { + func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { +- var encoded []int ++ encoded := make([]int, 0, len(seq)) + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) +diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go +index 95d8e59da69..b774da88d86 100644 +--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go ++++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go +@@ -352,9 +352,9 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { + + // Greater asserts that the first element is greater than the second + // +-// assert.Greater(t, 2, 1) +-// assert.Greater(t, float64(2), float64(1)) +-// assert.Greater(t, "b", "a") ++// assert.Greater(t, 2, 1) ++// assert.Greater(t, float64(2), float64(1)) ++// assert.Greater(t, "b", "a") + func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -364,10 +364,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface + + // GreaterOrEqual asserts that the first element is greater than or equal to the second + // +-// assert.GreaterOrEqual(t, 2, 1) +-// assert.GreaterOrEqual(t, 2, 2) +-// assert.GreaterOrEqual(t, "b", "a") +-// assert.GreaterOrEqual(t, "b", "b") ++// assert.GreaterOrEqual(t, 2, 1) ++// assert.GreaterOrEqual(t, 2, 2) ++// assert.GreaterOrEqual(t, "b", "a") ++// assert.GreaterOrEqual(t, "b", "b") + func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -377,9 +377,9 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in + + // Less asserts that the first element is less than the second + // +-// assert.Less(t, 1, 2) +-// assert.Less(t, float64(1), float64(2)) +-// assert.Less(t, "a", "b") ++// assert.Less(t, 1, 2) ++// assert.Less(t, float64(1), float64(2)) ++// assert.Less(t, "a", "b") + func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -389,10 +389,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) + + // LessOrEqual asserts that the first element is less than or equal to the second + // +-// assert.LessOrEqual(t, 1, 2) +-// assert.LessOrEqual(t, 2, 2) +-// assert.LessOrEqual(t, "a", "b") +-// assert.LessOrEqual(t, "b", "b") ++// assert.LessOrEqual(t, 1, 2) ++// assert.LessOrEqual(t, 2, 2) ++// assert.LessOrEqual(t, "a", "b") ++// assert.LessOrEqual(t, "b", "b") + func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -402,8 +402,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter + + // Positive asserts that the specified element is positive + // +-// assert.Positive(t, 1) +-// assert.Positive(t, 1.23) ++// assert.Positive(t, 1) ++// assert.Positive(t, 1.23) + func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -414,8 +414,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + + // Negative asserts that the specified element is negative + // +-// assert.Negative(t, -1) +-// assert.Negative(t, -1.23) ++// assert.Negative(t, -1) ++// assert.Negative(t, -1.23) + func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go +index 7880b8f9433..84dbd6c790b 100644 +--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go ++++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go +@@ -22,9 +22,9 @@ func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bo + // Containsf asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +-// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +-// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") ++// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") ++// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") ++// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") + func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -56,7 +56,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string + // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// assert.Emptyf(t, obj, "error message %s", "formatted") ++// assert.Emptyf(t, obj, "error message %s", "formatted") + func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -66,7 +66,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) boo + + // Equalf asserts that two objects are equal. + // +-// assert.Equalf(t, 123, 123, "error message %s", "formatted") ++// assert.Equalf(t, 123, 123, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -81,8 +81,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar + // EqualErrorf asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") + func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -90,10 +90,27 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args + return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) + } + ++// EqualExportedValuesf asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true ++// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false ++func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) ++} ++ + // EqualValuesf asserts that two objects are equal or convertable to the same types + // and equal. + // +-// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") ++// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") + func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -103,10 +120,10 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri + + // Errorf asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.Errorf(t, err, "error message %s", "formatted") { +-// assert.Equal(t, expectedErrorf, err) +-// } ++// actualObj, err := SomeFunction() ++// if assert.Errorf(t, err, "error message %s", "formatted") { ++// assert.Equal(t, expectedErrorf, err) ++// } + func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -126,8 +143,8 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int + // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") + func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -147,7 +164,7 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface + // Eventuallyf asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -155,9 +172,34 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick + return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) + } + ++// EventuallyWithTf asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) ++} ++ + // Exactlyf asserts that two objects are equal in value and type. + // +-// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") ++// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") + func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -183,7 +225,7 @@ func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{} + + // Falsef asserts that the specified value is false. + // +-// assert.Falsef(t, myBool, "error message %s", "formatted") ++// assert.Falsef(t, myBool, "error message %s", "formatted") + func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -202,9 +244,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool + + // Greaterf asserts that the first element is greater than the second + // +-// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +-// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +-// assert.Greaterf(t, "b", "a", "error message %s", "formatted") ++// assert.Greaterf(t, 2, 1, "error message %s", "formatted") ++// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") ++// assert.Greaterf(t, "b", "a", "error message %s", "formatted") + func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -214,10 +256,10 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in + + // GreaterOrEqualf asserts that the first element is greater than or equal to the second + // +-// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") + func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -228,7 +270,7 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg + // HTTPBodyContainsf asserts that a specified handler returns a + // body that contains a string. + // +-// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { +@@ -241,7 +283,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url + // HTTPBodyNotContainsf asserts that a specified handler returns a + // body that does not contain a string. + // +-// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { +@@ -253,7 +295,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u + + // HTTPErrorf asserts that a specified handler returns an error status code. + // +-// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -265,7 +307,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, + + // HTTPRedirectf asserts that a specified handler returns a redirect status code. + // +-// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -277,7 +319,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri + + // HTTPStatusCodef asserts that a specified handler returns a specified status code. + // +-// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") ++// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { +@@ -289,7 +331,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st + + // HTTPSuccessf asserts that a specified handler returns a success status code. + // +-// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") ++// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -301,7 +343,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin + + // Implementsf asserts that an object is implemented by the specified interface. + // +-// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") ++// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") + func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -311,7 +353,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms + + // InDeltaf asserts that the two numerals are within delta of each other. + // +-// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") ++// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") + func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -353,9 +395,9 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil + + // IsDecreasingf asserts that the collection is decreasing + // +-// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +-// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +-// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") + func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -365,9 +407,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface + + // IsIncreasingf asserts that the collection is increasing + // +-// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +-// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +-// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") + func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -377,9 +419,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface + + // IsNonDecreasingf asserts that the collection is not decreasing + // +-// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +-// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +-// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") + func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -389,9 +431,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf + + // IsNonIncreasingf asserts that the collection is not increasing + // +-// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +-// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +-// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") + func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -409,7 +451,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin + + // JSONEqf asserts that two JSON strings are equivalent. + // +-// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") ++// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") + func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -420,7 +462,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int + // Lenf asserts that the specified object has specific length. + // Lenf also fails if the object has a type that len() not accept. + // +-// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") ++// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") + func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -430,9 +472,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf + + // Lessf asserts that the first element is less than the second + // +-// assert.Lessf(t, 1, 2, "error message %s", "formatted") +-// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +-// assert.Lessf(t, "a", "b", "error message %s", "formatted") ++// assert.Lessf(t, 1, 2, "error message %s", "formatted") ++// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") ++// assert.Lessf(t, "a", "b", "error message %s", "formatted") + func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -442,10 +484,10 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter + + // LessOrEqualf asserts that the first element is less than or equal to the second + // +-// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +-// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +-// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +-// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") ++// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") ++// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") ++// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") ++// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") + func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -455,8 +497,8 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . + + // Negativef asserts that the specified element is negative + // +-// assert.Negativef(t, -1, "error message %s", "formatted") +-// assert.Negativef(t, -1.23, "error message %s", "formatted") ++// assert.Negativef(t, -1, "error message %s", "formatted") ++// assert.Negativef(t, -1.23, "error message %s", "formatted") + func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -467,7 +509,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool + // Neverf asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -477,7 +519,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. + + // Nilf asserts that the specified object is nil. + // +-// assert.Nilf(t, err, "error message %s", "formatted") ++// assert.Nilf(t, err, "error message %s", "formatted") + func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -496,10 +538,10 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool + + // NoErrorf asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.NoErrorf(t, err, "error message %s", "formatted") { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if assert.NoErrorf(t, err, "error message %s", "formatted") { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -519,9 +561,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) boo + // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +-// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +-// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") + func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -532,9 +574,9 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { ++// assert.Equal(t, "two", obj[1]) ++// } + func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -544,7 +586,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) + + // NotEqualf asserts that the specified values are NOT equal. + // +-// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") ++// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -557,7 +599,7 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, + + // NotEqualValuesf asserts that two objects are not equal even when converted to the same type + // +-// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") ++// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") + func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -576,7 +618,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf + + // NotNilf asserts that the specified object is not nil. + // +-// assert.NotNilf(t, err, "error message %s", "formatted") ++// assert.NotNilf(t, err, "error message %s", "formatted") + func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -586,7 +628,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bo + + // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") ++// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") + func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -596,8 +638,8 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo + + // NotRegexpf asserts that a specified regexp does not match a string. + // +-// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +-// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") ++// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") ++// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") + func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -607,7 +649,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. + + // NotSamef asserts that two pointers do not reference the same object. + // +-// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") ++// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -621,7 +663,7 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, + // NotSubsetf asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") ++// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") + func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -639,7 +681,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + + // Panicsf asserts that the code inside the specified PanicTestFunc panics. + // +-// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") + func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -651,7 +693,7 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -662,7 +704,7 @@ func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, + // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -672,8 +714,8 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str + + // Positivef asserts that the specified element is positive + // +-// assert.Positivef(t, 1, "error message %s", "formatted") +-// assert.Positivef(t, 1.23, "error message %s", "formatted") ++// assert.Positivef(t, 1, "error message %s", "formatted") ++// assert.Positivef(t, 1.23, "error message %s", "formatted") + func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -683,8 +725,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool + + // Regexpf asserts that a specified regexp matches a string. + // +-// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +-// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") ++// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") ++// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") + func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -694,7 +736,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in + + // Samef asserts that two pointers reference the same object. + // +-// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") ++// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -708,7 +750,7 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg + // Subsetf asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") ++// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") + func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -718,7 +760,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args + + // Truef asserts that the specified value is true. + // +-// assert.Truef(t, myBool, "error message %s", "formatted") ++// assert.Truef(t, myBool, "error message %s", "formatted") + func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -728,7 +770,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + + // WithinDurationf asserts that the two times are within duration delta of each other. + // +-// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") ++// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") + func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -738,7 +780,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim + + // WithinRangef asserts that a time is within a time range (inclusive). + // +-// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") ++// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") + func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go +index 339515b8bfb..b1d94aec53c 100644 +--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go ++++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go +@@ -30,9 +30,9 @@ func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{} + // Contains asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// a.Contains("Hello World", "World") +-// a.Contains(["Hello", "World"], "World") +-// a.Contains({"Hello": "World"}, "Hello") ++// a.Contains("Hello World", "World") ++// a.Contains(["Hello", "World"], "World") ++// a.Contains({"Hello": "World"}, "Hello") + func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -43,9 +43,9 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs .. + // Containsf asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// a.Containsf("Hello World", "World", "error message %s", "formatted") +-// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +-// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") ++// a.Containsf("Hello World", "World", "error message %s", "formatted") ++// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") ++// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") + func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -98,7 +98,7 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st + // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// a.Empty(obj) ++// a.Empty(obj) + func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -109,7 +109,7 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// a.Emptyf(obj, "error message %s", "formatted") ++// a.Emptyf(obj, "error message %s", "formatted") + func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -119,7 +119,7 @@ func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) + + // Equal asserts that two objects are equal. + // +-// a.Equal(123, 123) ++// a.Equal(123, 123) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -134,8 +134,8 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs + // EqualError asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// a.EqualError(err, expectedErrorString) ++// actualObj, err := SomeFunction() ++// a.EqualError(err, expectedErrorString) + func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -146,8 +146,8 @@ func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ... + // EqualErrorf asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") + func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -155,10 +155,44 @@ func (a *Assertions) EqualErrorf(theError error, errString string, msg string, a + return EqualErrorf(a.t, theError, errString, msg, args...) + } + ++// EqualExportedValues asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// a.EqualExportedValues(S{1, 2}, S{1, 3}) => true ++// a.EqualExportedValues(S{1, 2}, S{2, 3}) => false ++func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ return EqualExportedValues(a.t, expected, actual, msgAndArgs...) ++} ++ ++// EqualExportedValuesf asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true ++// a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false ++func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ return EqualExportedValuesf(a.t, expected, actual, msg, args...) ++} ++ + // EqualValues asserts that two objects are equal or convertable to the same types + // and equal. + // +-// a.EqualValues(uint32(123), int32(123)) ++// a.EqualValues(uint32(123), int32(123)) + func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -169,7 +203,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn + // EqualValuesf asserts that two objects are equal or convertable to the same types + // and equal. + // +-// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") ++// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") + func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -179,7 +213,7 @@ func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg + + // Equalf asserts that two objects are equal. + // +-// a.Equalf(123, 123, "error message %s", "formatted") ++// a.Equalf(123, 123, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -193,10 +227,10 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string + + // Error asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.Error(err) { +-// assert.Equal(t, expectedError, err) +-// } ++// actualObj, err := SomeFunction() ++// if a.Error(err) { ++// assert.Equal(t, expectedError, err) ++// } + func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -225,8 +259,8 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. + // ErrorContains asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// a.ErrorContains(err, expectedErrorSubString) ++// actualObj, err := SomeFunction() ++// a.ErrorContains(err, expectedErrorSubString) + func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -237,8 +271,8 @@ func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs . + // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") + func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -266,10 +300,10 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter + + // Errorf asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.Errorf(err, "error message %s", "formatted") { +-// assert.Equal(t, expectedErrorf, err) +-// } ++// actualObj, err := SomeFunction() ++// if a.Errorf(err, "error message %s", "formatted") { ++// assert.Equal(t, expectedErrorf, err) ++// } + func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -280,7 +314,7 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + // Eventually asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) ++// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) + func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -288,10 +322,60 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti + return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) + } + ++// EventuallyWithT asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// a.EventuallyWithT(func(c *assert.CollectT) { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ return EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...) ++} ++ ++// EventuallyWithTf asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ return EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...) ++} ++ + // Eventuallyf asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -301,7 +385,7 @@ func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, t + + // Exactly asserts that two objects are equal in value and type. + // +-// a.Exactly(int32(123), int64(123)) ++// a.Exactly(int32(123), int64(123)) + func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -311,7 +395,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg + + // Exactlyf asserts that two objects are equal in value and type. + // +-// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") ++// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") + func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -353,7 +437,7 @@ func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{ + + // False asserts that the specified value is false. + // +-// a.False(myBool) ++// a.False(myBool) + func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -363,7 +447,7 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + + // Falsef asserts that the specified value is false. + // +-// a.Falsef(myBool, "error message %s", "formatted") ++// a.Falsef(myBool, "error message %s", "formatted") + func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -391,9 +475,9 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) b + + // Greater asserts that the first element is greater than the second + // +-// a.Greater(2, 1) +-// a.Greater(float64(2), float64(1)) +-// a.Greater("b", "a") ++// a.Greater(2, 1) ++// a.Greater(float64(2), float64(1)) ++// a.Greater("b", "a") + func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -403,10 +487,10 @@ func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...inter + + // GreaterOrEqual asserts that the first element is greater than or equal to the second + // +-// a.GreaterOrEqual(2, 1) +-// a.GreaterOrEqual(2, 2) +-// a.GreaterOrEqual("b", "a") +-// a.GreaterOrEqual("b", "b") ++// a.GreaterOrEqual(2, 1) ++// a.GreaterOrEqual(2, 2) ++// a.GreaterOrEqual("b", "a") ++// a.GreaterOrEqual("b", "b") + func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -416,10 +500,10 @@ func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs . + + // GreaterOrEqualf asserts that the first element is greater than or equal to the second + // +-// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +-// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +-// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +-// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") ++// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") ++// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") ++// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") ++// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") + func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -429,9 +513,9 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, + + // Greaterf asserts that the first element is greater than the second + // +-// a.Greaterf(2, 1, "error message %s", "formatted") +-// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") +-// a.Greaterf("b", "a", "error message %s", "formatted") ++// a.Greaterf(2, 1, "error message %s", "formatted") ++// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") ++// a.Greaterf("b", "a", "error message %s", "formatted") + func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -442,7 +526,7 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args . + // HTTPBodyContains asserts that a specified handler returns a + // body that contains a string. + // +-// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { +@@ -455,7 +539,7 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u + // HTTPBodyContainsf asserts that a specified handler returns a + // body that contains a string. + // +-// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { +@@ -468,7 +552,7 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, + // HTTPBodyNotContains asserts that a specified handler returns a + // body that does not contain a string. + // +-// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { +@@ -481,7 +565,7 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string + // HTTPBodyNotContainsf asserts that a specified handler returns a + // body that does not contain a string. + // +-// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { +@@ -493,7 +577,7 @@ func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method strin + + // HTTPError asserts that a specified handler returns an error status code. + // +-// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -505,7 +589,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri + + // HTTPErrorf asserts that a specified handler returns an error status code. + // +-// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -517,7 +601,7 @@ func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url str + + // HTTPRedirect asserts that a specified handler returns a redirect status code. + // +-// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -529,7 +613,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s + + // HTTPRedirectf asserts that a specified handler returns a redirect status code. + // +-// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -541,7 +625,7 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url + + // HTTPStatusCode asserts that a specified handler returns a specified status code. + // +-// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) ++// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { +@@ -553,7 +637,7 @@ func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url + + // HTTPStatusCodef asserts that a specified handler returns a specified status code. + // +-// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") ++// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { +@@ -565,7 +649,7 @@ func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, ur + + // HTTPSuccess asserts that a specified handler returns a success status code. + // +-// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) ++// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -577,7 +661,7 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st + + // HTTPSuccessf asserts that a specified handler returns a success status code. + // +-// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") ++// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -589,7 +673,7 @@ func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url s + + // Implements asserts that an object is implemented by the specified interface. + // +-// a.Implements((*MyInterface)(nil), new(MyObject)) ++// a.Implements((*MyInterface)(nil), new(MyObject)) + func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -599,7 +683,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, + + // Implementsf asserts that an object is implemented by the specified interface. + // +-// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") ++// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") + func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -609,7 +693,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} + + // InDelta asserts that the two numerals are within delta of each other. + // +-// a.InDelta(math.Pi, 22/7.0, 0.01) ++// a.InDelta(math.Pi, 22/7.0, 0.01) + func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -651,7 +735,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del + + // InDeltaf asserts that the two numerals are within delta of each other. + // +-// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") ++// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") + func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -693,9 +777,9 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo + + // IsDecreasing asserts that the collection is decreasing + // +-// a.IsDecreasing([]int{2, 1, 0}) +-// a.IsDecreasing([]float{2, 1}) +-// a.IsDecreasing([]string{"b", "a"}) ++// a.IsDecreasing([]int{2, 1, 0}) ++// a.IsDecreasing([]float{2, 1}) ++// a.IsDecreasing([]string{"b", "a"}) + func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -705,9 +789,9 @@ func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) + + // IsDecreasingf asserts that the collection is decreasing + // +-// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +-// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +-// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") ++// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") ++// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") ++// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") + func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -717,9 +801,9 @@ func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...inter + + // IsIncreasing asserts that the collection is increasing + // +-// a.IsIncreasing([]int{1, 2, 3}) +-// a.IsIncreasing([]float{1, 2}) +-// a.IsIncreasing([]string{"a", "b"}) ++// a.IsIncreasing([]int{1, 2, 3}) ++// a.IsIncreasing([]float{1, 2}) ++// a.IsIncreasing([]string{"a", "b"}) + func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -729,9 +813,9 @@ func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) + + // IsIncreasingf asserts that the collection is increasing + // +-// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +-// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +-// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") ++// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") ++// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") ++// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") + func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -741,9 +825,9 @@ func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...inter + + // IsNonDecreasing asserts that the collection is not decreasing + // +-// a.IsNonDecreasing([]int{1, 1, 2}) +-// a.IsNonDecreasing([]float{1, 2}) +-// a.IsNonDecreasing([]string{"a", "b"}) ++// a.IsNonDecreasing([]int{1, 1, 2}) ++// a.IsNonDecreasing([]float{1, 2}) ++// a.IsNonDecreasing([]string{"a", "b"}) + func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -753,9 +837,9 @@ func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface + + // IsNonDecreasingf asserts that the collection is not decreasing + // +-// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +-// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +-// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") + func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -765,9 +849,9 @@ func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...in + + // IsNonIncreasing asserts that the collection is not increasing + // +-// a.IsNonIncreasing([]int{2, 1, 1}) +-// a.IsNonIncreasing([]float{2, 1}) +-// a.IsNonIncreasing([]string{"b", "a"}) ++// a.IsNonIncreasing([]int{2, 1, 1}) ++// a.IsNonIncreasing([]float{2, 1}) ++// a.IsNonIncreasing([]string{"b", "a"}) + func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -777,9 +861,9 @@ func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface + + // IsNonIncreasingf asserts that the collection is not increasing + // +-// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +-// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +-// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") + func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -805,7 +889,7 @@ func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg s + + // JSONEq asserts that two JSON strings are equivalent. + // +-// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) ++// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -815,7 +899,7 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf + + // JSONEqf asserts that two JSON strings are equivalent. + // +-// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") ++// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") + func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -826,7 +910,7 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. + // Len asserts that the specified object has specific length. + // Len also fails if the object has a type that len() not accept. + // +-// a.Len(mySlice, 3) ++// a.Len(mySlice, 3) + func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -837,7 +921,7 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface + // Lenf asserts that the specified object has specific length. + // Lenf also fails if the object has a type that len() not accept. + // +-// a.Lenf(mySlice, 3, "error message %s", "formatted") ++// a.Lenf(mySlice, 3, "error message %s", "formatted") + func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -847,9 +931,9 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in + + // Less asserts that the first element is less than the second + // +-// a.Less(1, 2) +-// a.Less(float64(1), float64(2)) +-// a.Less("a", "b") ++// a.Less(1, 2) ++// a.Less(float64(1), float64(2)) ++// a.Less("a", "b") + func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -859,10 +943,10 @@ func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interfac + + // LessOrEqual asserts that the first element is less than or equal to the second + // +-// a.LessOrEqual(1, 2) +-// a.LessOrEqual(2, 2) +-// a.LessOrEqual("a", "b") +-// a.LessOrEqual("b", "b") ++// a.LessOrEqual(1, 2) ++// a.LessOrEqual(2, 2) ++// a.LessOrEqual("a", "b") ++// a.LessOrEqual("b", "b") + func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -872,10 +956,10 @@ func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...i + + // LessOrEqualf asserts that the first element is less than or equal to the second + // +-// a.LessOrEqualf(1, 2, "error message %s", "formatted") +-// a.LessOrEqualf(2, 2, "error message %s", "formatted") +-// a.LessOrEqualf("a", "b", "error message %s", "formatted") +-// a.LessOrEqualf("b", "b", "error message %s", "formatted") ++// a.LessOrEqualf(1, 2, "error message %s", "formatted") ++// a.LessOrEqualf(2, 2, "error message %s", "formatted") ++// a.LessOrEqualf("a", "b", "error message %s", "formatted") ++// a.LessOrEqualf("b", "b", "error message %s", "formatted") + func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -885,9 +969,9 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar + + // Lessf asserts that the first element is less than the second + // +-// a.Lessf(1, 2, "error message %s", "formatted") +-// a.Lessf(float64(1), float64(2), "error message %s", "formatted") +-// a.Lessf("a", "b", "error message %s", "formatted") ++// a.Lessf(1, 2, "error message %s", "formatted") ++// a.Lessf(float64(1), float64(2), "error message %s", "formatted") ++// a.Lessf("a", "b", "error message %s", "formatted") + func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -897,8 +981,8 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i + + // Negative asserts that the specified element is negative + // +-// a.Negative(-1) +-// a.Negative(-1.23) ++// a.Negative(-1) ++// a.Negative(-1.23) + func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -908,8 +992,8 @@ func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { + + // Negativef asserts that the specified element is negative + // +-// a.Negativef(-1, "error message %s", "formatted") +-// a.Negativef(-1.23, "error message %s", "formatted") ++// a.Negativef(-1, "error message %s", "formatted") ++// a.Negativef(-1.23, "error message %s", "formatted") + func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -920,7 +1004,7 @@ func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) b + // Never asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) ++// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) + func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -931,7 +1015,7 @@ func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick ti + // Neverf asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -941,7 +1025,7 @@ func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick t + + // Nil asserts that the specified object is nil. + // +-// a.Nil(err) ++// a.Nil(err) + func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -951,7 +1035,7 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + + // Nilf asserts that the specified object is nil. + // +-// a.Nilf(err, "error message %s", "formatted") ++// a.Nilf(err, "error message %s", "formatted") + func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -979,10 +1063,10 @@ func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) + + // NoError asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.NoError(err) { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if a.NoError(err) { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -992,10 +1076,10 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + + // NoErrorf asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.NoErrorf(err, "error message %s", "formatted") { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if a.NoErrorf(err, "error message %s", "formatted") { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1024,9 +1108,9 @@ func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// a.NotContains("Hello World", "Earth") +-// a.NotContains(["Hello", "World"], "Earth") +-// a.NotContains({"Hello": "World"}, "Earth") ++// a.NotContains("Hello World", "Earth") ++// a.NotContains(["Hello", "World"], "Earth") ++// a.NotContains({"Hello": "World"}, "Earth") + func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1037,9 +1121,9 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs + // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +-// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +-// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") ++// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") ++// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") ++// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") + func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1050,9 +1134,9 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if a.NotEmpty(obj) { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if a.NotEmpty(obj) { ++// assert.Equal(t, "two", obj[1]) ++// } + func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1063,9 +1147,9 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if a.NotEmptyf(obj, "error message %s", "formatted") { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if a.NotEmptyf(obj, "error message %s", "formatted") { ++// assert.Equal(t, "two", obj[1]) ++// } + func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1075,7 +1159,7 @@ func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface + + // NotEqual asserts that the specified values are NOT equal. + // +-// a.NotEqual(obj1, obj2) ++// a.NotEqual(obj1, obj2) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1088,7 +1172,7 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr + + // NotEqualValues asserts that two objects are not equal even when converted to the same type + // +-// a.NotEqualValues(obj1, obj2) ++// a.NotEqualValues(obj1, obj2) + func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1098,7 +1182,7 @@ func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, ms + + // NotEqualValuesf asserts that two objects are not equal even when converted to the same type + // +-// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") ++// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") + func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1108,7 +1192,7 @@ func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, m + + // NotEqualf asserts that the specified values are NOT equal. + // +-// a.NotEqualf(obj1, obj2, "error message %s", "formatted") ++// a.NotEqualf(obj1, obj2, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1139,7 +1223,7 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in + + // NotNil asserts that the specified object is not nil. + // +-// a.NotNil(err) ++// a.NotNil(err) + func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1149,7 +1233,7 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool + + // NotNilf asserts that the specified object is not nil. + // +-// a.NotNilf(err, "error message %s", "formatted") ++// a.NotNilf(err, "error message %s", "formatted") + func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1159,7 +1243,7 @@ func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{} + + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// a.NotPanics(func(){ RemainCalm() }) ++// a.NotPanics(func(){ RemainCalm() }) + func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1169,7 +1253,7 @@ func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool + + // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") ++// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") + func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1179,8 +1263,8 @@ func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{} + + // NotRegexp asserts that a specified regexp does not match a string. + // +-// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +-// a.NotRegexp("^start", "it's not starting") ++// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") ++// a.NotRegexp("^start", "it's not starting") + func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1190,8 +1274,8 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in + + // NotRegexpf asserts that a specified regexp does not match a string. + // +-// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +-// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") ++// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") ++// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") + func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1201,7 +1285,7 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg + + // NotSame asserts that two pointers do not reference the same object. + // +-// a.NotSame(ptr1, ptr2) ++// a.NotSame(ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1214,7 +1298,7 @@ func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArg + + // NotSamef asserts that two pointers do not reference the same object. + // +-// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") ++// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1228,7 +1312,7 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri + // NotSubset asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") ++// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") + func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1239,7 +1323,7 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs + // NotSubsetf asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") ++// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") + func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1265,7 +1349,7 @@ func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bo + + // Panics asserts that the code inside the specified PanicTestFunc panics. + // +-// a.Panics(func(){ GoCrazy() }) ++// a.Panics(func(){ GoCrazy() }) + func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1277,7 +1361,7 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// a.PanicsWithError("crazy error", func(){ GoCrazy() }) ++// a.PanicsWithError("crazy error", func(){ GoCrazy() }) + func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1289,7 +1373,7 @@ func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndAr + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1300,7 +1384,7 @@ func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg str + // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) ++// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) + func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1311,7 +1395,7 @@ func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgA + // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1321,7 +1405,7 @@ func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg + + // Panicsf asserts that the code inside the specified PanicTestFunc panics. + // +-// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") ++// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1331,8 +1415,8 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b + + // Positive asserts that the specified element is positive + // +-// a.Positive(1) +-// a.Positive(1.23) ++// a.Positive(1) ++// a.Positive(1.23) + func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1342,8 +1426,8 @@ func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { + + // Positivef asserts that the specified element is positive + // +-// a.Positivef(1, "error message %s", "formatted") +-// a.Positivef(1.23, "error message %s", "formatted") ++// a.Positivef(1, "error message %s", "formatted") ++// a.Positivef(1.23, "error message %s", "formatted") + func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1353,8 +1437,8 @@ func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) b + + // Regexp asserts that a specified regexp matches a string. + // +-// a.Regexp(regexp.MustCompile("start"), "it's starting") +-// a.Regexp("start...$", "it's not starting") ++// a.Regexp(regexp.MustCompile("start"), "it's starting") ++// a.Regexp("start...$", "it's not starting") + func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1364,8 +1448,8 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter + + // Regexpf asserts that a specified regexp matches a string. + // +-// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +-// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") ++// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") ++// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") + func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1375,7 +1459,7 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . + + // Same asserts that two pointers reference the same object. + // +-// a.Same(ptr1, ptr2) ++// a.Same(ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1388,7 +1472,7 @@ func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs . + + // Samef asserts that two pointers reference the same object. + // +-// a.Samef(ptr1, ptr2, "error message %s", "formatted") ++// a.Samef(ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1402,7 +1486,7 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, + // Subset asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") ++// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") + func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1413,7 +1497,7 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... + // Subsetf asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") ++// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") + func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1423,7 +1507,7 @@ func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, a + + // True asserts that the specified value is true. + // +-// a.True(myBool) ++// a.True(myBool) + func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1433,7 +1517,7 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + + // Truef asserts that the specified value is true. + // +-// a.Truef(myBool, "error message %s", "formatted") ++// a.Truef(myBool, "error message %s", "formatted") + func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1443,7 +1527,7 @@ func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + + // WithinDuration asserts that the two times are within duration delta of each other. + // +-// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) ++// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) + func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1453,7 +1537,7 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta + + // WithinDurationf asserts that the two times are within duration delta of each other. + // +-// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") ++// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") + func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1463,7 +1547,7 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta + + // WithinRange asserts that a time is within a time range (inclusive). + // +-// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) ++// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) + func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1473,7 +1557,7 @@ func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Tim + + // WithinRangef asserts that a time is within a time range (inclusive). + // +-// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") ++// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") + func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go +index 75944878358..00df62a0599 100644 +--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go ++++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go +@@ -46,36 +46,36 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT + + // IsIncreasing asserts that the collection is increasing + // +-// assert.IsIncreasing(t, []int{1, 2, 3}) +-// assert.IsIncreasing(t, []float{1, 2}) +-// assert.IsIncreasing(t, []string{"a", "b"}) ++// assert.IsIncreasing(t, []int{1, 2, 3}) ++// assert.IsIncreasing(t, []float{1, 2}) ++// assert.IsIncreasing(t, []string{"a", "b"}) + func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + } + + // IsNonIncreasing asserts that the collection is not increasing + // +-// assert.IsNonIncreasing(t, []int{2, 1, 1}) +-// assert.IsNonIncreasing(t, []float{2, 1}) +-// assert.IsNonIncreasing(t, []string{"b", "a"}) ++// assert.IsNonIncreasing(t, []int{2, 1, 1}) ++// assert.IsNonIncreasing(t, []float{2, 1}) ++// assert.IsNonIncreasing(t, []string{"b", "a"}) + func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + } + + // IsDecreasing asserts that the collection is decreasing + // +-// assert.IsDecreasing(t, []int{2, 1, 0}) +-// assert.IsDecreasing(t, []float{2, 1}) +-// assert.IsDecreasing(t, []string{"b", "a"}) ++// assert.IsDecreasing(t, []int{2, 1, 0}) ++// assert.IsDecreasing(t, []float{2, 1}) ++// assert.IsDecreasing(t, []string{"b", "a"}) + func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + } + + // IsNonDecreasing asserts that the collection is not decreasing + // +-// assert.IsNonDecreasing(t, []int{1, 1, 2}) +-// assert.IsNonDecreasing(t, []float{1, 2}) +-// assert.IsNonDecreasing(t, []string{"a", "b"}) ++// assert.IsNonDecreasing(t, []int{1, 1, 2}) ++// assert.IsNonDecreasing(t, []float{1, 2}) ++// assert.IsNonDecreasing(t, []string{"a", "b"}) + func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + } +diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go +index fa1245b1897..a55d1bba926 100644 +--- a/vendor/github.com/stretchr/testify/assert/assertions.go ++++ b/vendor/github.com/stretchr/testify/assert/assertions.go +@@ -8,7 +8,6 @@ import ( + "fmt" + "math" + "os" +- "path/filepath" + "reflect" + "regexp" + "runtime" +@@ -76,6 +75,77 @@ func ObjectsAreEqual(expected, actual interface{}) bool { + return bytes.Equal(exp, act) + } + ++// copyExportedFields iterates downward through nested data structures and creates a copy ++// that only contains the exported struct fields. ++func copyExportedFields(expected interface{}) interface{} { ++ if isNil(expected) { ++ return expected ++ } ++ ++ expectedType := reflect.TypeOf(expected) ++ expectedKind := expectedType.Kind() ++ expectedValue := reflect.ValueOf(expected) ++ ++ switch expectedKind { ++ case reflect.Struct: ++ result := reflect.New(expectedType).Elem() ++ for i := 0; i < expectedType.NumField(); i++ { ++ field := expectedType.Field(i) ++ isExported := field.IsExported() ++ if isExported { ++ fieldValue := expectedValue.Field(i) ++ if isNil(fieldValue) || isNil(fieldValue.Interface()) { ++ continue ++ } ++ newValue := copyExportedFields(fieldValue.Interface()) ++ result.Field(i).Set(reflect.ValueOf(newValue)) ++ } ++ } ++ return result.Interface() ++ ++ case reflect.Ptr: ++ result := reflect.New(expectedType.Elem()) ++ unexportedRemoved := copyExportedFields(expectedValue.Elem().Interface()) ++ result.Elem().Set(reflect.ValueOf(unexportedRemoved)) ++ return result.Interface() ++ ++ case reflect.Array, reflect.Slice: ++ result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) ++ for i := 0; i < expectedValue.Len(); i++ { ++ index := expectedValue.Index(i) ++ if isNil(index) { ++ continue ++ } ++ unexportedRemoved := copyExportedFields(index.Interface()) ++ result.Index(i).Set(reflect.ValueOf(unexportedRemoved)) ++ } ++ return result.Interface() ++ ++ case reflect.Map: ++ result := reflect.MakeMap(expectedType) ++ for _, k := range expectedValue.MapKeys() { ++ index := expectedValue.MapIndex(k) ++ unexportedRemoved := copyExportedFields(index.Interface()) ++ result.SetMapIndex(k, reflect.ValueOf(unexportedRemoved)) ++ } ++ return result.Interface() ++ ++ default: ++ return expected ++ } ++} ++ ++// ObjectsExportedFieldsAreEqual determines if the exported (public) fields of two objects are ++// considered equal. This comparison of only exported fields is applied recursively to nested data ++// structures. ++// ++// This function does no assertion of any kind. ++func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { ++ expectedCleaned := copyExportedFields(expected) ++ actualCleaned := copyExportedFields(actual) ++ return ObjectsAreEqualValues(expectedCleaned, actualCleaned) ++} ++ + // ObjectsAreEqualValues gets whether two objects are equal, or if their + // values are equal. + func ObjectsAreEqualValues(expected, actual interface{}) bool { +@@ -141,12 +211,11 @@ func CallerInfo() []string { + } + + parts := strings.Split(file, "/") +- file = parts[len(parts)-1] + if len(parts) > 1 { ++ filename := parts[len(parts)-1] + dir := parts[len(parts)-2] +- if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { +- path, _ := filepath.Abs(file) +- callers = append(callers, fmt.Sprintf("%s:%d", path, line)) ++ if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { ++ callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + +@@ -273,7 +342,7 @@ type labeledContent struct { + + // labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: + // +-// \t{{label}}:{{align_spaces}}\t{{content}}\n ++// \t{{label}}:{{align_spaces}}\t{{content}}\n + // + // The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. + // If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this +@@ -296,7 +365,7 @@ func labeledOutput(content ...labeledContent) string { + + // Implements asserts that an object is implemented by the specified interface. + // +-// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) ++// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) + func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -328,7 +397,7 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs + + // Equal asserts that two objects are equal. + // +-// assert.Equal(t, 123, 123) ++// assert.Equal(t, 123, 123) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -369,7 +438,7 @@ func validateEqualArgs(expected, actual interface{}) error { + + // Same asserts that two pointers reference the same object. + // +-// assert.Same(t, ptr1, ptr2) ++// assert.Same(t, ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -389,7 +458,7 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b + + // NotSame asserts that two pointers do not reference the same object. + // +-// assert.NotSame(t, ptr1, ptr2) ++// assert.NotSame(t, ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -457,7 +526,7 @@ func truncatingFormat(data interface{}) string { + // EqualValues asserts that two objects are equal or convertable to the same types + // and equal. + // +-// assert.EqualValues(t, uint32(123), int32(123)) ++// assert.EqualValues(t, uint32(123), int32(123)) + func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -475,9 +544,53 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa + + } + ++// EqualExportedValues asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true ++// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false ++func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ ++ aType := reflect.TypeOf(expected) ++ bType := reflect.TypeOf(actual) ++ ++ if aType != bType { ++ return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) ++ } ++ ++ if aType.Kind() != reflect.Struct { ++ return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) ++ } ++ ++ if bType.Kind() != reflect.Struct { ++ return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) ++ } ++ ++ expected = copyExportedFields(expected) ++ actual = copyExportedFields(actual) ++ ++ if !ObjectsAreEqualValues(expected, actual) { ++ diff := diff(expected, actual) ++ expected, actual = formatUnequalValues(expected, actual) ++ return Fail(t, fmt.Sprintf("Not equal (comparing only exported fields): \n"+ ++ "expected: %s\n"+ ++ "actual : %s%s", expected, actual, diff), msgAndArgs...) ++ } ++ ++ return true ++} ++ + // Exactly asserts that two objects are equal in value and type. + // +-// assert.Exactly(t, int32(123), int64(123)) ++// assert.Exactly(t, int32(123), int64(123)) + func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -496,7 +609,7 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} + + // NotNil asserts that the specified object is not nil. + // +-// assert.NotNil(t, err) ++// assert.NotNil(t, err) + func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if !isNil(object) { + return true +@@ -530,7 +643,7 @@ func isNil(object interface{}) bool { + []reflect.Kind{ + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, +- reflect.Ptr, reflect.Slice}, ++ reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, + kind) + + if isNilableKind && value.IsNil() { +@@ -542,7 +655,7 @@ func isNil(object interface{}) bool { + + // Nil asserts that the specified object is nil. + // +-// assert.Nil(t, err) ++// assert.Nil(t, err) + func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if isNil(object) { + return true +@@ -585,7 +698,7 @@ func isEmpty(object interface{}) bool { + // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// assert.Empty(t, obj) ++// assert.Empty(t, obj) + func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + pass := isEmpty(object) + if !pass { +@@ -602,9 +715,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if assert.NotEmpty(t, obj) { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if assert.NotEmpty(t, obj) { ++// assert.Equal(t, "two", obj[1]) ++// } + func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + pass := !isEmpty(object) + if !pass { +@@ -633,7 +746,7 @@ func getLen(x interface{}) (ok bool, length int) { + // Len asserts that the specified object has specific length. + // Len also fails if the object has a type that len() not accept. + // +-// assert.Len(t, mySlice, 3) ++// assert.Len(t, mySlice, 3) + func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -651,7 +764,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) + + // True asserts that the specified value is true. + // +-// assert.True(t, myBool) ++// assert.True(t, myBool) + func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if !value { + if h, ok := t.(tHelper); ok { +@@ -666,7 +779,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + // False asserts that the specified value is false. + // +-// assert.False(t, myBool) ++// assert.False(t, myBool) + func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if value { + if h, ok := t.(tHelper); ok { +@@ -681,7 +794,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + // NotEqual asserts that the specified values are NOT equal. + // +-// assert.NotEqual(t, obj1, obj2) ++// assert.NotEqual(t, obj1, obj2) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -704,7 +817,7 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ + + // NotEqualValues asserts that two objects are not equal even when converted to the same type + // +-// assert.NotEqualValues(t, obj1, obj2) ++// assert.NotEqualValues(t, obj1, obj2) + func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -763,9 +876,9 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { + // Contains asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// assert.Contains(t, "Hello World", "World") +-// assert.Contains(t, ["Hello", "World"], "World") +-// assert.Contains(t, {"Hello": "World"}, "Hello") ++// assert.Contains(t, "Hello World", "World") ++// assert.Contains(t, ["Hello", "World"], "World") ++// assert.Contains(t, {"Hello": "World"}, "Hello") + func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -786,9 +899,9 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// assert.NotContains(t, "Hello World", "Earth") +-// assert.NotContains(t, ["Hello", "World"], "Earth") +-// assert.NotContains(t, {"Hello": "World"}, "Earth") ++// assert.NotContains(t, "Hello World", "Earth") ++// assert.NotContains(t, ["Hello", "World"], "Earth") ++// assert.NotContains(t, {"Hello": "World"}, "Earth") + func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -796,10 +909,10 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) + + ok, found := containsElement(s, contains) + if !ok { +- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) ++ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) + } + if found { +- return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) ++ return Fail(t, fmt.Sprintf("%#v should not contain %#v", s, contains), msgAndArgs...) + } + + return true +@@ -809,7 +922,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) + // Subset asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") ++// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") + func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -818,49 +931,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok + return true // we consider nil to be equal to the nil set + } + +- defer func() { +- if e := recover(); e != nil { +- ok = false +- } +- }() +- + listKind := reflect.TypeOf(list).Kind() +- subsetKind := reflect.TypeOf(subset).Kind() +- + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + ++ subsetKind := reflect.TypeOf(subset).Kind() + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + +- subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { +- listValue := reflect.ValueOf(list) +- subsetKeys := subsetValue.MapKeys() ++ subsetMap := reflect.ValueOf(subset) ++ actualMap := reflect.ValueOf(list) + +- for i := 0; i < len(subsetKeys); i++ { +- subsetKey := subsetKeys[i] +- subsetElement := subsetValue.MapIndex(subsetKey).Interface() +- listElement := listValue.MapIndex(subsetKey).Interface() ++ for _, k := range subsetMap.MapKeys() { ++ ev := subsetMap.MapIndex(k) ++ av := actualMap.MapIndex(k) + +- if !ObjectsAreEqual(subsetElement, listElement) { +- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) ++ if !av.IsValid() { ++ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) ++ } ++ if !ObjectsAreEqual(ev.Interface(), av.Interface()) { ++ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) + } + } + + return true + } + +- for i := 0; i < subsetValue.Len(); i++ { +- element := subsetValue.Index(i).Interface() ++ subsetList := reflect.ValueOf(subset) ++ for i := 0; i < subsetList.Len(); i++ { ++ element := subsetList.Index(i).Interface() + ok, found := containsElement(list, element) + if !ok { +- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) ++ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...) + } + if !found { +- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) ++ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...) + } + } + +@@ -870,7 +978,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok + // NotSubset asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") ++// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") + func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -879,34 +987,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) + } + +- defer func() { +- if e := recover(); e != nil { +- ok = false +- } +- }() +- + listKind := reflect.TypeOf(list).Kind() +- subsetKind := reflect.TypeOf(subset).Kind() +- + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + ++ subsetKind := reflect.TypeOf(subset).Kind() + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + +- subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { +- listValue := reflect.ValueOf(list) +- subsetKeys := subsetValue.MapKeys() ++ subsetMap := reflect.ValueOf(subset) ++ actualMap := reflect.ValueOf(list) + +- for i := 0; i < len(subsetKeys); i++ { +- subsetKey := subsetKeys[i] +- subsetElement := subsetValue.MapIndex(subsetKey).Interface() +- listElement := listValue.MapIndex(subsetKey).Interface() ++ for _, k := range subsetMap.MapKeys() { ++ ev := subsetMap.MapIndex(k) ++ av := actualMap.MapIndex(k) + +- if !ObjectsAreEqual(subsetElement, listElement) { ++ if !av.IsValid() { ++ return true ++ } ++ if !ObjectsAreEqual(ev.Interface(), av.Interface()) { + return true + } + } +@@ -914,8 +1016,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) + } + +- for i := 0; i < subsetValue.Len(); i++ { +- element := subsetValue.Index(i).Interface() ++ subsetList := reflect.ValueOf(subset) ++ for i := 0; i < subsetList.Len(); i++ { ++ element := subsetList.Index(i).Interface() + ok, found := containsElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) +@@ -1060,7 +1163,7 @@ func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string + + // Panics asserts that the code inside the specified PanicTestFunc panics. + // +-// assert.Panics(t, func(){ GoCrazy() }) ++// assert.Panics(t, func(){ GoCrazy() }) + func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1076,7 +1179,7 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) ++// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) + func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1097,7 +1200,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) ++// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) + func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1117,7 +1220,7 @@ func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs . + + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// assert.NotPanics(t, func(){ RemainCalm() }) ++// assert.NotPanics(t, func(){ RemainCalm() }) + func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1132,7 +1235,7 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + // WithinDuration asserts that the two times are within duration delta of each other. + // +-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) ++// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) + func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1148,7 +1251,7 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, + + // WithinRange asserts that a time is within a time range (inclusive). + // +-// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) ++// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) + func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1207,7 +1310,7 @@ func toFloat(x interface{}) (float64, bool) { + + // InDelta asserts that the two numerals are within delta of each other. + // +-// assert.InDelta(t, math.Pi, 22/7.0, 0.01) ++// assert.InDelta(t, math.Pi, 22/7.0, 0.01) + func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1380,10 +1483,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m + + // NoError asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.NoError(t, err) { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if assert.NoError(t, err) { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if err != nil { + if h, ok := t.(tHelper); ok { +@@ -1397,10 +1500,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + + // Error asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.Error(t, err) { +-// assert.Equal(t, expectedError, err) +-// } ++// actualObj, err := SomeFunction() ++// if assert.Error(t, err) { ++// assert.Equal(t, expectedError, err) ++// } + func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + if err == nil { + if h, ok := t.(tHelper); ok { +@@ -1415,8 +1518,8 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + // EqualError asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// assert.EqualError(t, err, expectedErrorString) ++// actualObj, err := SomeFunction() ++// assert.EqualError(t, err, expectedErrorString) + func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1438,8 +1541,8 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte + // ErrorContains asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// assert.ErrorContains(t, err, expectedErrorSubString) ++// actualObj, err := SomeFunction() ++// assert.ErrorContains(t, err, expectedErrorSubString) + func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1472,8 +1575,8 @@ func matchRegexp(rx interface{}, str interface{}) bool { + + // Regexp asserts that a specified regexp matches a string. + // +-// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +-// assert.Regexp(t, "start...$", "it's not starting") ++// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") ++// assert.Regexp(t, "start...$", "it's not starting") + func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1490,8 +1593,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface + + // NotRegexp asserts that a specified regexp does not match a string. + // +-// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +-// assert.NotRegexp(t, "^start", "it's not starting") ++// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") ++// assert.NotRegexp(t, "^start", "it's not starting") + func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1603,7 +1706,7 @@ func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + + // JSONEq asserts that two JSON strings are equivalent. + // +-// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) ++// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1726,7 +1829,7 @@ type tHelper interface { + // Eventually asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) ++// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) + func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1756,10 +1859,93 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t + } + } + ++// CollectT implements the TestingT interface and collects all errors. ++type CollectT struct { ++ errors []error ++} ++ ++// Errorf collects the error. ++func (c *CollectT) Errorf(format string, args ...interface{}) { ++ c.errors = append(c.errors, fmt.Errorf(format, args...)) ++} ++ ++// FailNow panics. ++func (c *CollectT) FailNow() { ++ panic("Assertion failed") ++} ++ ++// Reset clears the collected errors. ++func (c *CollectT) Reset() { ++ c.errors = nil ++} ++ ++// Copy copies the collected errors to the supplied t. ++func (c *CollectT) Copy(t TestingT) { ++ if tt, ok := t.(tHelper); ok { ++ tt.Helper() ++ } ++ for _, err := range c.errors { ++ t.Errorf("%v", err) ++ } ++} ++ ++// EventuallyWithT asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// assert.EventuallyWithT(t, func(c *assert.CollectT) { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ ++ collect := new(CollectT) ++ ch := make(chan bool, 1) ++ ++ timer := time.NewTimer(waitFor) ++ defer timer.Stop() ++ ++ ticker := time.NewTicker(tick) ++ defer ticker.Stop() ++ ++ for tick := ticker.C; ; { ++ select { ++ case <-timer.C: ++ collect.Copy(t) ++ return Fail(t, "Condition never satisfied", msgAndArgs...) ++ case <-tick: ++ tick = nil ++ collect.Reset() ++ go func() { ++ condition(collect) ++ ch <- len(collect.errors) == 0 ++ }() ++ case v := <-ch: ++ if v { ++ return true ++ } ++ tick = ticker.C ++ } ++ } ++} ++ + // Never asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) ++// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) + func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go +index c9dccc4d6cd..4953981d387 100644 +--- a/vendor/github.com/stretchr/testify/assert/doc.go ++++ b/vendor/github.com/stretchr/testify/assert/doc.go +@@ -1,39 +1,40 @@ + // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. + // +-// Example Usage ++// # Example Usage + // + // The following is a complete example using assert in a standard test function: +-// import ( +-// "testing" +-// "github.com/stretchr/testify/assert" +-// ) + // +-// func TestSomething(t *testing.T) { ++// import ( ++// "testing" ++// "github.com/stretchr/testify/assert" ++// ) + // +-// var a string = "Hello" +-// var b string = "Hello" ++// func TestSomething(t *testing.T) { + // +-// assert.Equal(t, a, b, "The two words should be the same.") ++// var a string = "Hello" ++// var b string = "Hello" + // +-// } ++// assert.Equal(t, a, b, "The two words should be the same.") ++// ++// } + // + // if you assert many times, use the format below: + // +-// import ( +-// "testing" +-// "github.com/stretchr/testify/assert" +-// ) ++// import ( ++// "testing" ++// "github.com/stretchr/testify/assert" ++// ) + // +-// func TestSomething(t *testing.T) { +-// assert := assert.New(t) ++// func TestSomething(t *testing.T) { ++// assert := assert.New(t) + // +-// var a string = "Hello" +-// var b string = "Hello" ++// var a string = "Hello" ++// var b string = "Hello" + // +-// assert.Equal(a, b, "The two words should be the same.") +-// } ++// assert.Equal(a, b, "The two words should be the same.") ++// } + // +-// Assertions ++// # Assertions + // + // Assertions allow you to easily write test code, and are global funcs in the `assert` package. + // All assertion functions take, as the first argument, the `*testing.T` object provided by the +diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go +index 4ed341dd289..d8038c28a75 100644 +--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go ++++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go +@@ -23,7 +23,7 @@ func httpCode(handler http.HandlerFunc, method, url string, values url.Values) ( + + // HTTPSuccess asserts that a specified handler returns a success status code. + // +-// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) ++// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -45,7 +45,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value + + // HTTPRedirect asserts that a specified handler returns a redirect status code. + // +-// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -67,7 +67,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu + + // HTTPError asserts that a specified handler returns an error status code. + // +-// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -89,7 +89,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values + + // HTTPStatusCode asserts that a specified handler returns a specified status code. + // +-// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) ++// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { +@@ -124,7 +124,7 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) s + // HTTPBodyContains asserts that a specified handler returns a + // body that contains a string. + // +-// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { +@@ -144,7 +144,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, + // HTTPBodyNotContains asserts that a specified handler returns a + // body that does not contain a string. + // +-// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { +diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go +index 7324128ef19..d6b3c844cc8 100644 +--- a/vendor/github.com/stretchr/testify/mock/doc.go ++++ b/vendor/github.com/stretchr/testify/mock/doc.go +@@ -1,17 +1,17 @@ + // Package mock provides a system by which it is possible to mock your objects + // and verify calls are happening as expected. + // +-// Example Usage ++// # Example Usage + // + // The mock package provides an object, Mock, that tracks activity on another object. It is usually + // embedded into a test object as shown below: + // +-// type MyTestObject struct { +-// // add a Mock object instance +-// mock.Mock ++// type MyTestObject struct { ++// // add a Mock object instance ++// mock.Mock + // +-// // other fields go here as normal +-// } ++// // other fields go here as normal ++// } + // + // When implementing the methods of an interface, you wire your functions up + // to call the Mock.Called(args...) method, and return the appropriate values. +@@ -19,25 +19,25 @@ + // For example, to mock a method that saves the name and age of a person and returns + // the year of their birth or an error, you might write this: + // +-// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +-// args := o.Called(firstname, lastname, age) +-// return args.Int(0), args.Error(1) +-// } ++// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { ++// args := o.Called(firstname, lastname, age) ++// return args.Int(0), args.Error(1) ++// } + // + // The Int, Error and Bool methods are examples of strongly typed getters that take the argument + // index position. Given this argument list: + // +-// (12, true, "Something") ++// (12, true, "Something") + // + // You could read them out strongly typed like this: + // +-// args.Int(0) +-// args.Bool(1) +-// args.String(2) ++// args.Int(0) ++// args.Bool(1) ++// args.String(2) + // + // For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: + // +-// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) ++// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) + // + // This may cause a panic if the object you are getting is nil (the type assertion will fail), in those + // cases you should check for nil first. +diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go +index f0af8246cfc..f4b42e44ffe 100644 +--- a/vendor/github.com/stretchr/testify/mock/mock.go ++++ b/vendor/github.com/stretchr/testify/mock/mock.go +@@ -3,6 +3,7 @@ package mock + import ( + "errors" + "fmt" ++ "path" + "reflect" + "regexp" + "runtime" +@@ -13,6 +14,7 @@ import ( + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" + "github.com/stretchr/objx" ++ + "github.com/stretchr/testify/assert" + ) + +@@ -99,7 +101,7 @@ func (c *Call) unlock() { + + // Return specifies the return arguments for the expectation. + // +-// Mock.On("DoSomething").Return(errors.New("failed")) ++// Mock.On("DoSomething").Return(errors.New("failed")) + func (c *Call) Return(returnArguments ...interface{}) *Call { + c.lock() + defer c.unlock() +@@ -111,7 +113,7 @@ func (c *Call) Return(returnArguments ...interface{}) *Call { + + // Panic specifies if the functon call should fail and the panic message + // +-// Mock.On("DoSomething").Panic("test panic") ++// Mock.On("DoSomething").Panic("test panic") + func (c *Call) Panic(msg string) *Call { + c.lock() + defer c.unlock() +@@ -123,14 +125,14 @@ func (c *Call) Panic(msg string) *Call { + + // Once indicates that that the mock should only return the value once. + // +-// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() ++// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() + func (c *Call) Once() *Call { + return c.Times(1) + } + + // Twice indicates that that the mock should only return the value twice. + // +-// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() ++// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() + func (c *Call) Twice() *Call { + return c.Times(2) + } +@@ -138,7 +140,7 @@ func (c *Call) Twice() *Call { + // Times indicates that that the mock should only return the indicated number + // of times. + // +-// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) ++// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) + func (c *Call) Times(i int) *Call { + c.lock() + defer c.unlock() +@@ -149,7 +151,7 @@ func (c *Call) Times(i int) *Call { + // WaitUntil sets the channel that will block the mock's return until its closed + // or a message is received. + // +-// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) ++// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) + func (c *Call) WaitUntil(w <-chan time.Time) *Call { + c.lock() + defer c.unlock() +@@ -159,7 +161,7 @@ func (c *Call) WaitUntil(w <-chan time.Time) *Call { + + // After sets how long to block until the call returns + // +-// Mock.On("MyMethod", arg1, arg2).After(time.Second) ++// Mock.On("MyMethod", arg1, arg2).After(time.Second) + func (c *Call) After(d time.Duration) *Call { + c.lock() + defer c.unlock() +@@ -171,10 +173,10 @@ func (c *Call) After(d time.Duration) *Call { + // mocking a method (such as an unmarshaler) that takes a pointer to a struct and + // sets properties in such struct + // +-// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}")).Return().Run(func(args Arguments) { +-// arg := args.Get(0).(*map[string]interface{}) +-// arg["foo"] = "bar" +-// }) ++// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}")).Return().Run(func(args Arguments) { ++// arg := args.Get(0).(*map[string]interface{}) ++// arg["foo"] = "bar" ++// }) + func (c *Call) Run(fn func(args Arguments)) *Call { + c.lock() + defer c.unlock() +@@ -194,16 +196,18 @@ func (c *Call) Maybe() *Call { + // On chains a new expectation description onto the mocked interface. This + // allows syntax like. + // +-// Mock. +-// On("MyMethod", 1).Return(nil). +-// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) ++// Mock. ++// On("MyMethod", 1).Return(nil). ++// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) ++// + //go:noinline + func (c *Call) On(methodName string, arguments ...interface{}) *Call { + return c.Parent.On(methodName, arguments...) + } + + // Unset removes a mock handler from being called. +-// test.On("func", mock.Anything).Unset() ++// ++// test.On("func", mock.Anything).Unset() + func (c *Call) Unset() *Call { + var unlockOnce sync.Once + +@@ -218,16 +222,22 @@ func (c *Call) Unset() *Call { + + foundMatchingCall := false + +- for i, call := range c.Parent.ExpectedCalls { ++ // in-place filter slice for calls to be removed - iterate from 0'th to last skipping unnecessary ones ++ var index int // write index ++ for _, call := range c.Parent.ExpectedCalls { + if call.Method == c.Method { + _, diffCount := call.Arguments.Diff(c.Arguments) + if diffCount == 0 { + foundMatchingCall = true +- // Remove from ExpectedCalls +- c.Parent.ExpectedCalls = append(c.Parent.ExpectedCalls[:i], c.Parent.ExpectedCalls[i+1:]...) ++ // Remove from ExpectedCalls - just skip it ++ continue + } + } ++ c.Parent.ExpectedCalls[index] = call ++ index++ + } ++ // trim slice up to last copied index ++ c.Parent.ExpectedCalls = c.Parent.ExpectedCalls[:index] + + if !foundMatchingCall { + unlockOnce.Do(c.unlock) +@@ -243,9 +253,9 @@ func (c *Call) Unset() *Call { + // calls have been called as expected. The referenced calls may be from the + // same mock instance and/or other mock instances. + // +-// Mock.On("Do").Return(nil).Notbefore( +-// Mock.On("Init").Return(nil) +-// ) ++// Mock.On("Do").Return(nil).Notbefore( ++// Mock.On("Init").Return(nil) ++// ) + func (c *Call) NotBefore(calls ...*Call) *Call { + c.lock() + defer c.unlock() +@@ -328,7 +338,7 @@ func (m *Mock) fail(format string, args ...interface{}) { + // On starts a description of an expectation of the specified method + // being called. + // +-// Mock.On("MyMethod", arg1, arg2) ++// Mock.On("MyMethod", arg1, arg2) + func (m *Mock) On(methodName string, arguments ...interface{}) *Call { + for _, arg := range arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { +@@ -418,6 +428,10 @@ func callString(method string, arguments Arguments, includeArgumentValues bool) + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { ++ if _, ok := arg.(*FunctionalOptionsArgument); ok { ++ argVals = append(argVals, fmt.Sprintf("%d: %s", argIndex, arg)) ++ continue ++ } + argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) +@@ -752,6 +766,7 @@ type AnythingOfTypeArgument string + // name of the type to check for. Used in Diff and Assert. + // + // For example: ++// + // Assert(t, AnythingOfType("string"), AnythingOfType("int")) + func AnythingOfType(t string) AnythingOfTypeArgument { + return AnythingOfTypeArgument(t) +@@ -774,6 +789,34 @@ func IsType(t interface{}) *IsTypeArgument { + return &IsTypeArgument{t: t} + } + ++// FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument ++// for use when type checking. ++type FunctionalOptionsArgument struct { ++ value interface{} ++} ++ ++// String returns the string representation of FunctionalOptionsArgument ++func (f *FunctionalOptionsArgument) String() string { ++ var name string ++ tValue := reflect.ValueOf(f.value) ++ if tValue.Len() > 0 { ++ name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() ++ } ++ ++ return strings.Replace(fmt.Sprintf("%#v", f.value), "[]interface {}", name, 1) ++} ++ ++// FunctionalOptions returns an FunctionalOptionsArgument object containing the functional option type ++// and the values to check of ++// ++// For example: ++// Assert(t, FunctionalOptions("[]foo.FunctionalOption", foo.Opt1(), foo.Opt2())) ++func FunctionalOptions(value ...interface{}) *FunctionalOptionsArgument { ++ return &FunctionalOptionsArgument{ ++ value: value, ++ } ++} ++ + // argumentMatcher performs custom argument matching, returning whether or + // not the argument is matched by the expectation fixture function. + type argumentMatcher struct { +@@ -920,6 +963,29 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) + } ++ } else if reflect.TypeOf(expected) == reflect.TypeOf((*FunctionalOptionsArgument)(nil)) { ++ t := expected.(*FunctionalOptionsArgument).value ++ ++ var name string ++ tValue := reflect.ValueOf(t) ++ if tValue.Len() > 0 { ++ name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() ++ } ++ ++ tName := reflect.TypeOf(t).Name() ++ if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { ++ differences++ ++ output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) ++ } else { ++ if ef, af := assertOpts(t, actual); ef == "" && af == "" { ++ // match ++ output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) ++ } else { ++ // not match ++ differences++ ++ output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) ++ } ++ } + } else { + // normal checking + +@@ -1096,3 +1162,65 @@ var spewConfig = spew.ConfigState{ + type tHelper interface { + Helper() + } ++ ++func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { ++ expectedOpts := reflect.ValueOf(expected) ++ actualOpts := reflect.ValueOf(actual) ++ var expectedNames []string ++ for i := 0; i < expectedOpts.Len(); i++ { ++ expectedNames = append(expectedNames, funcName(expectedOpts.Index(i).Interface())) ++ } ++ var actualNames []string ++ for i := 0; i < actualOpts.Len(); i++ { ++ actualNames = append(actualNames, funcName(actualOpts.Index(i).Interface())) ++ } ++ if !assert.ObjectsAreEqual(expectedNames, actualNames) { ++ expectedFmt = fmt.Sprintf("%v", expectedNames) ++ actualFmt = fmt.Sprintf("%v", actualNames) ++ return ++ } ++ ++ for i := 0; i < expectedOpts.Len(); i++ { ++ expectedOpt := expectedOpts.Index(i).Interface() ++ actualOpt := actualOpts.Index(i).Interface() ++ ++ expectedFunc := expectedNames[i] ++ actualFunc := actualNames[i] ++ if expectedFunc != actualFunc { ++ expectedFmt = expectedFunc ++ actualFmt = actualFunc ++ return ++ } ++ ++ ot := reflect.TypeOf(expectedOpt) ++ var expectedValues []reflect.Value ++ var actualValues []reflect.Value ++ if ot.NumIn() == 0 { ++ return ++ } ++ ++ for i := 0; i < ot.NumIn(); i++ { ++ vt := ot.In(i).Elem() ++ expectedValues = append(expectedValues, reflect.New(vt)) ++ actualValues = append(actualValues, reflect.New(vt)) ++ } ++ ++ reflect.ValueOf(expectedOpt).Call(expectedValues) ++ reflect.ValueOf(actualOpt).Call(actualValues) ++ ++ for i := 0; i < ot.NumIn(); i++ { ++ if !assert.ObjectsAreEqual(expectedValues[i].Interface(), actualValues[i].Interface()) { ++ expectedFmt = fmt.Sprintf("%s %+v", expectedNames[i], expectedValues[i].Interface()) ++ actualFmt = fmt.Sprintf("%s %+v", expectedNames[i], actualValues[i].Interface()) ++ return ++ } ++ } ++ } ++ ++ return "", "" ++} ++ ++func funcName(opt interface{}) string { ++ n := runtime.FuncForPC(reflect.ValueOf(opt).Pointer()).Name() ++ return strings.TrimSuffix(path.Base(n), path.Ext(n)) ++} +diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go +index 169de39221c..96843472455 100644 +--- a/vendor/github.com/stretchr/testify/require/doc.go ++++ b/vendor/github.com/stretchr/testify/require/doc.go +@@ -1,24 +1,25 @@ + // Package require implements the same assertions as the `assert` package but + // stops test execution when a test fails. + // +-// Example Usage ++// # Example Usage + // + // The following is a complete example using require in a standard test function: +-// import ( +-// "testing" +-// "github.com/stretchr/testify/require" +-// ) + // +-// func TestSomething(t *testing.T) { ++// import ( ++// "testing" ++// "github.com/stretchr/testify/require" ++// ) + // +-// var a string = "Hello" +-// var b string = "Hello" ++// func TestSomething(t *testing.T) { + // +-// require.Equal(t, a, b, "The two words should be the same.") ++// var a string = "Hello" ++// var b string = "Hello" + // +-// } ++// require.Equal(t, a, b, "The two words should be the same.") + // +-// Assertions ++// } ++// ++// # Assertions + // + // The `require` package have same global functions as in the `assert` package, + // but instead of returning a boolean result they call `t.FailNow()`. +diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go +index 880853f5a2c..63f85214767 100644 +--- a/vendor/github.com/stretchr/testify/require/require.go ++++ b/vendor/github.com/stretchr/testify/require/require.go +@@ -37,9 +37,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac + // Contains asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// assert.Contains(t, "Hello World", "World") +-// assert.Contains(t, ["Hello", "World"], "World") +-// assert.Contains(t, {"Hello": "World"}, "Hello") ++// assert.Contains(t, "Hello World", "World") ++// assert.Contains(t, ["Hello", "World"], "World") ++// assert.Contains(t, {"Hello": "World"}, "Hello") + func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -53,9 +53,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int + // Containsf asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +-// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +-// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") ++// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") ++// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") ++// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") + func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -123,7 +123,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string + // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// assert.Empty(t, obj) ++// assert.Empty(t, obj) + func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -137,7 +137,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// assert.Emptyf(t, obj, "error message %s", "formatted") ++// assert.Emptyf(t, obj, "error message %s", "formatted") + func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -150,7 +150,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + + // Equal asserts that two objects are equal. + // +-// assert.Equal(t, 123, 123) ++// assert.Equal(t, 123, 123) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -168,8 +168,8 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i + // EqualError asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// assert.EqualError(t, err, expectedErrorString) ++// actualObj, err := SomeFunction() ++// assert.EqualError(t, err, expectedErrorString) + func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -183,8 +183,8 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte + // EqualErrorf asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") + func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -195,10 +195,50 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args + t.FailNow() + } + ++// EqualExportedValues asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true ++// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false ++func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ if assert.EqualExportedValues(t, expected, actual, msgAndArgs...) { ++ return ++ } ++ t.FailNow() ++} ++ ++// EqualExportedValuesf asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true ++// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false ++func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ if assert.EqualExportedValuesf(t, expected, actual, msg, args...) { ++ return ++ } ++ t.FailNow() ++} ++ + // EqualValues asserts that two objects are equal or convertable to the same types + // and equal. + // +-// assert.EqualValues(t, uint32(123), int32(123)) ++// assert.EqualValues(t, uint32(123), int32(123)) + func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -212,7 +252,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg + // EqualValuesf asserts that two objects are equal or convertable to the same types + // and equal. + // +-// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") ++// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") + func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -225,7 +265,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri + + // Equalf asserts that two objects are equal. + // +-// assert.Equalf(t, 123, 123, "error message %s", "formatted") ++// assert.Equalf(t, 123, 123, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -242,10 +282,10 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar + + // Error asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.Error(t, err) { +-// assert.Equal(t, expectedError, err) +-// } ++// actualObj, err := SomeFunction() ++// if assert.Error(t, err) { ++// assert.Equal(t, expectedError, err) ++// } + func Error(t TestingT, err error, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -283,8 +323,8 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int + // ErrorContains asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// assert.ErrorContains(t, err, expectedErrorSubString) ++// actualObj, err := SomeFunction() ++// assert.ErrorContains(t, err, expectedErrorSubString) + func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -298,8 +338,8 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in + // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") + func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -336,10 +376,10 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface + + // Errorf asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.Errorf(t, err, "error message %s", "formatted") { +-// assert.Equal(t, expectedErrorf, err) +-// } ++// actualObj, err := SomeFunction() ++// if assert.Errorf(t, err, "error message %s", "formatted") { ++// assert.Equal(t, expectedErrorf, err) ++// } + func Errorf(t TestingT, err error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -353,7 +393,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { + // Eventually asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) ++// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) + func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -364,10 +404,66 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t + t.FailNow() + } + ++// EventuallyWithT asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// assert.EventuallyWithT(t, func(c *assert.CollectT) { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ if assert.EventuallyWithT(t, condition, waitFor, tick, msgAndArgs...) { ++ return ++ } ++ t.FailNow() ++} ++ ++// EventuallyWithTf asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ if assert.EventuallyWithTf(t, condition, waitFor, tick, msg, args...) { ++ return ++ } ++ t.FailNow() ++} ++ + // Eventuallyf asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -380,7 +476,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick + + // Exactly asserts that two objects are equal in value and type. + // +-// assert.Exactly(t, int32(123), int64(123)) ++// assert.Exactly(t, int32(123), int64(123)) + func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -393,7 +489,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. + + // Exactlyf asserts that two objects are equal in value and type. + // +-// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") ++// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") + func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -450,7 +546,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { + + // False asserts that the specified value is false. + // +-// assert.False(t, myBool) ++// assert.False(t, myBool) + func False(t TestingT, value bool, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -463,7 +559,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { + + // Falsef asserts that the specified value is false. + // +-// assert.Falsef(t, myBool, "error message %s", "formatted") ++// assert.Falsef(t, myBool, "error message %s", "formatted") + func Falsef(t TestingT, value bool, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -500,9 +596,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { + + // Greater asserts that the first element is greater than the second + // +-// assert.Greater(t, 2, 1) +-// assert.Greater(t, float64(2), float64(1)) +-// assert.Greater(t, "b", "a") ++// assert.Greater(t, 2, 1) ++// assert.Greater(t, float64(2), float64(1)) ++// assert.Greater(t, "b", "a") + func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -515,10 +611,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface + + // GreaterOrEqual asserts that the first element is greater than or equal to the second + // +-// assert.GreaterOrEqual(t, 2, 1) +-// assert.GreaterOrEqual(t, 2, 2) +-// assert.GreaterOrEqual(t, "b", "a") +-// assert.GreaterOrEqual(t, "b", "b") ++// assert.GreaterOrEqual(t, 2, 1) ++// assert.GreaterOrEqual(t, 2, 2) ++// assert.GreaterOrEqual(t, "b", "a") ++// assert.GreaterOrEqual(t, "b", "b") + func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -531,10 +627,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in + + // GreaterOrEqualf asserts that the first element is greater than or equal to the second + // +-// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") + func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -547,9 +643,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg + + // Greaterf asserts that the first element is greater than the second + // +-// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +-// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +-// assert.Greaterf(t, "b", "a", "error message %s", "formatted") ++// assert.Greaterf(t, 2, 1, "error message %s", "formatted") ++// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") ++// assert.Greaterf(t, "b", "a", "error message %s", "formatted") + func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -563,7 +659,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in + // HTTPBodyContains asserts that a specified handler returns a + // body that contains a string. + // +-// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { +@@ -579,7 +675,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s + // HTTPBodyContainsf asserts that a specified handler returns a + // body that contains a string. + // +-// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { +@@ -595,7 +691,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url + // HTTPBodyNotContains asserts that a specified handler returns a + // body that does not contain a string. + // +-// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { +@@ -611,7 +707,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur + // HTTPBodyNotContainsf asserts that a specified handler returns a + // body that does not contain a string. + // +-// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { +@@ -626,7 +722,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u + + // HTTPError asserts that a specified handler returns an error status code. + // +-// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -641,7 +737,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, + + // HTTPErrorf asserts that a specified handler returns an error status code. + // +-// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -656,7 +752,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, + + // HTTPRedirect asserts that a specified handler returns a redirect status code. + // +-// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -671,7 +767,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin + + // HTTPRedirectf asserts that a specified handler returns a redirect status code. + // +-// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -686,7 +782,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri + + // HTTPStatusCode asserts that a specified handler returns a specified status code. + // +-// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) ++// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { +@@ -701,7 +797,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str + + // HTTPStatusCodef asserts that a specified handler returns a specified status code. + // +-// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") ++// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { +@@ -716,7 +812,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st + + // HTTPSuccess asserts that a specified handler returns a success status code. + // +-// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) ++// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -731,7 +827,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string + + // HTTPSuccessf asserts that a specified handler returns a success status code. + // +-// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") ++// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -746,7 +842,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin + + // Implements asserts that an object is implemented by the specified interface. + // +-// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) ++// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) + func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -759,7 +855,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg + + // Implementsf asserts that an object is implemented by the specified interface. + // +-// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") ++// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") + func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -772,7 +868,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms + + // InDelta asserts that the two numerals are within delta of each other. + // +-// assert.InDelta(t, math.Pi, 22/7.0, 0.01) ++// assert.InDelta(t, math.Pi, 22/7.0, 0.01) + func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -829,7 +925,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f + + // InDeltaf asserts that the two numerals are within delta of each other. + // +-// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") ++// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") + func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -886,9 +982,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl + + // IsDecreasing asserts that the collection is decreasing + // +-// assert.IsDecreasing(t, []int{2, 1, 0}) +-// assert.IsDecreasing(t, []float{2, 1}) +-// assert.IsDecreasing(t, []string{"b", "a"}) ++// assert.IsDecreasing(t, []int{2, 1, 0}) ++// assert.IsDecreasing(t, []float{2, 1}) ++// assert.IsDecreasing(t, []string{"b", "a"}) + func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -901,9 +997,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + + // IsDecreasingf asserts that the collection is decreasing + // +-// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +-// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +-// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") + func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -916,9 +1012,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface + + // IsIncreasing asserts that the collection is increasing + // +-// assert.IsIncreasing(t, []int{1, 2, 3}) +-// assert.IsIncreasing(t, []float{1, 2}) +-// assert.IsIncreasing(t, []string{"a", "b"}) ++// assert.IsIncreasing(t, []int{1, 2, 3}) ++// assert.IsIncreasing(t, []float{1, 2}) ++// assert.IsIncreasing(t, []string{"a", "b"}) + func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -931,9 +1027,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + + // IsIncreasingf asserts that the collection is increasing + // +-// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +-// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +-// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") + func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -946,9 +1042,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface + + // IsNonDecreasing asserts that the collection is not decreasing + // +-// assert.IsNonDecreasing(t, []int{1, 1, 2}) +-// assert.IsNonDecreasing(t, []float{1, 2}) +-// assert.IsNonDecreasing(t, []string{"a", "b"}) ++// assert.IsNonDecreasing(t, []int{1, 1, 2}) ++// assert.IsNonDecreasing(t, []float{1, 2}) ++// assert.IsNonDecreasing(t, []string{"a", "b"}) + func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -961,9 +1057,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) + + // IsNonDecreasingf asserts that the collection is not decreasing + // +-// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +-// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +-// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") + func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -976,9 +1072,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf + + // IsNonIncreasing asserts that the collection is not increasing + // +-// assert.IsNonIncreasing(t, []int{2, 1, 1}) +-// assert.IsNonIncreasing(t, []float{2, 1}) +-// assert.IsNonIncreasing(t, []string{"b", "a"}) ++// assert.IsNonIncreasing(t, []int{2, 1, 1}) ++// assert.IsNonIncreasing(t, []float{2, 1}) ++// assert.IsNonIncreasing(t, []string{"b", "a"}) + func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -991,9 +1087,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) + + // IsNonIncreasingf asserts that the collection is not increasing + // +-// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +-// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +-// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") + func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1028,7 +1124,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin + + // JSONEq asserts that two JSON strings are equivalent. + // +-// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) ++// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1041,7 +1137,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ + + // JSONEqf asserts that two JSON strings are equivalent. + // +-// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") ++// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") + func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1055,7 +1151,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int + // Len asserts that the specified object has specific length. + // Len also fails if the object has a type that len() not accept. + // +-// assert.Len(t, mySlice, 3) ++// assert.Len(t, mySlice, 3) + func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1069,7 +1165,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) + // Lenf asserts that the specified object has specific length. + // Lenf also fails if the object has a type that len() not accept. + // +-// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") ++// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") + func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1082,9 +1178,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf + + // Less asserts that the first element is less than the second + // +-// assert.Less(t, 1, 2) +-// assert.Less(t, float64(1), float64(2)) +-// assert.Less(t, "a", "b") ++// assert.Less(t, 1, 2) ++// assert.Less(t, float64(1), float64(2)) ++// assert.Less(t, "a", "b") + func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1097,10 +1193,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) + + // LessOrEqual asserts that the first element is less than or equal to the second + // +-// assert.LessOrEqual(t, 1, 2) +-// assert.LessOrEqual(t, 2, 2) +-// assert.LessOrEqual(t, "a", "b") +-// assert.LessOrEqual(t, "b", "b") ++// assert.LessOrEqual(t, 1, 2) ++// assert.LessOrEqual(t, 2, 2) ++// assert.LessOrEqual(t, "a", "b") ++// assert.LessOrEqual(t, "b", "b") + func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1113,10 +1209,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter + + // LessOrEqualf asserts that the first element is less than or equal to the second + // +-// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +-// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +-// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +-// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") ++// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") ++// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") ++// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") ++// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") + func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1129,9 +1225,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . + + // Lessf asserts that the first element is less than the second + // +-// assert.Lessf(t, 1, 2, "error message %s", "formatted") +-// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +-// assert.Lessf(t, "a", "b", "error message %s", "formatted") ++// assert.Lessf(t, 1, 2, "error message %s", "formatted") ++// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") ++// assert.Lessf(t, "a", "b", "error message %s", "formatted") + func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1144,8 +1240,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter + + // Negative asserts that the specified element is negative + // +-// assert.Negative(t, -1) +-// assert.Negative(t, -1.23) ++// assert.Negative(t, -1) ++// assert.Negative(t, -1.23) + func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1158,8 +1254,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { + + // Negativef asserts that the specified element is negative + // +-// assert.Negativef(t, -1, "error message %s", "formatted") +-// assert.Negativef(t, -1.23, "error message %s", "formatted") ++// assert.Negativef(t, -1, "error message %s", "formatted") ++// assert.Negativef(t, -1.23, "error message %s", "formatted") + func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1173,7 +1269,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { + // Never asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) ++// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) + func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1187,7 +1283,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D + // Neverf asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1200,7 +1296,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. + + // Nil asserts that the specified object is nil. + // +-// assert.Nil(t, err) ++// assert.Nil(t, err) + func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1213,7 +1309,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + + // Nilf asserts that the specified object is nil. + // +-// assert.Nilf(t, err, "error message %s", "formatted") ++// assert.Nilf(t, err, "error message %s", "formatted") + func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1250,10 +1346,10 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { + + // NoError asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.NoError(t, err) { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if assert.NoError(t, err) { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func NoError(t TestingT, err error, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1266,10 +1362,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { + + // NoErrorf asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.NoErrorf(t, err, "error message %s", "formatted") { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if assert.NoErrorf(t, err, "error message %s", "formatted") { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1307,9 +1403,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// assert.NotContains(t, "Hello World", "Earth") +-// assert.NotContains(t, ["Hello", "World"], "Earth") +-// assert.NotContains(t, {"Hello": "World"}, "Earth") ++// assert.NotContains(t, "Hello World", "Earth") ++// assert.NotContains(t, ["Hello", "World"], "Earth") ++// assert.NotContains(t, {"Hello": "World"}, "Earth") + func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1323,9 +1419,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... + // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +-// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +-// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") + func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1339,9 +1435,9 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if assert.NotEmpty(t, obj) { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if assert.NotEmpty(t, obj) { ++// assert.Equal(t, "two", obj[1]) ++// } + func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1355,9 +1451,9 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { ++// assert.Equal(t, "two", obj[1]) ++// } + func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1370,7 +1466,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) + + // NotEqual asserts that the specified values are NOT equal. + // +-// assert.NotEqual(t, obj1, obj2) ++// assert.NotEqual(t, obj1, obj2) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1386,7 +1482,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . + + // NotEqualValues asserts that two objects are not equal even when converted to the same type + // +-// assert.NotEqualValues(t, obj1, obj2) ++// assert.NotEqualValues(t, obj1, obj2) + func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1399,7 +1495,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd + + // NotEqualValuesf asserts that two objects are not equal even when converted to the same type + // +-// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") ++// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") + func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1412,7 +1508,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s + + // NotEqualf asserts that the specified values are NOT equal. + // +-// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") ++// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1452,7 +1548,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf + + // NotNil asserts that the specified object is not nil. + // +-// assert.NotNil(t, err) ++// assert.NotNil(t, err) + func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1465,7 +1561,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + + // NotNilf asserts that the specified object is not nil. + // +-// assert.NotNilf(t, err, "error message %s", "formatted") ++// assert.NotNilf(t, err, "error message %s", "formatted") + func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1478,7 +1574,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { + + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// assert.NotPanics(t, func(){ RemainCalm() }) ++// assert.NotPanics(t, func(){ RemainCalm() }) + func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1491,7 +1587,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + + // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") ++// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") + func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1504,8 +1600,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac + + // NotRegexp asserts that a specified regexp does not match a string. + // +-// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +-// assert.NotRegexp(t, "^start", "it's not starting") ++// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") ++// assert.NotRegexp(t, "^start", "it's not starting") + func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1518,8 +1614,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf + + // NotRegexpf asserts that a specified regexp does not match a string. + // +-// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +-// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") ++// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") ++// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") + func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1532,7 +1628,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. + + // NotSame asserts that two pointers do not reference the same object. + // +-// assert.NotSame(t, ptr1, ptr2) ++// assert.NotSame(t, ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1548,7 +1644,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. + + // NotSamef asserts that two pointers do not reference the same object. + // +-// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") ++// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1565,7 +1661,7 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, + // NotSubset asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") ++// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") + func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1579,7 +1675,7 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i + // NotSubsetf asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") ++// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") + func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1614,7 +1710,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { + + // Panics asserts that the code inside the specified PanicTestFunc panics. + // +-// assert.Panics(t, func(){ GoCrazy() }) ++// assert.Panics(t, func(){ GoCrazy() }) + func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1629,7 +1725,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) ++// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) + func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1644,7 +1740,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1658,7 +1754,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg + // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) ++// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) + func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1672,7 +1768,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m + // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1685,7 +1781,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, + + // Panicsf asserts that the code inside the specified PanicTestFunc panics. + // +-// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") + func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1698,8 +1794,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} + + // Positive asserts that the specified element is positive + // +-// assert.Positive(t, 1) +-// assert.Positive(t, 1.23) ++// assert.Positive(t, 1) ++// assert.Positive(t, 1.23) + func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1712,8 +1808,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { + + // Positivef asserts that the specified element is positive + // +-// assert.Positivef(t, 1, "error message %s", "formatted") +-// assert.Positivef(t, 1.23, "error message %s", "formatted") ++// assert.Positivef(t, 1, "error message %s", "formatted") ++// assert.Positivef(t, 1.23, "error message %s", "formatted") + func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1726,8 +1822,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { + + // Regexp asserts that a specified regexp matches a string. + // +-// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +-// assert.Regexp(t, "start...$", "it's not starting") ++// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") ++// assert.Regexp(t, "start...$", "it's not starting") + func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1740,8 +1836,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface + + // Regexpf asserts that a specified regexp matches a string. + // +-// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +-// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") ++// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") ++// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") + func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1754,7 +1850,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in + + // Same asserts that two pointers reference the same object. + // +-// assert.Same(t, ptr1, ptr2) ++// assert.Same(t, ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1770,7 +1866,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in + + // Samef asserts that two pointers reference the same object. + // +-// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") ++// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1787,7 +1883,7 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg + // Subset asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") ++// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") + func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1801,7 +1897,7 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte + // Subsetf asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") ++// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") + func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1814,7 +1910,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args + + // True asserts that the specified value is true. + // +-// assert.True(t, myBool) ++// assert.True(t, myBool) + func True(t TestingT, value bool, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1827,7 +1923,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { + + // Truef asserts that the specified value is true. + // +-// assert.Truef(t, myBool, "error message %s", "formatted") ++// assert.Truef(t, myBool, "error message %s", "formatted") + func Truef(t TestingT, value bool, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1840,7 +1936,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { + + // WithinDuration asserts that the two times are within duration delta of each other. + // +-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) ++// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) + func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1853,7 +1949,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time + + // WithinDurationf asserts that the two times are within duration delta of each other. + // +-// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") ++// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") + func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1866,7 +1962,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim + + // WithinRange asserts that a time is within a time range (inclusive). + // +-// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) ++// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) + func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1879,7 +1975,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m + + // WithinRangef asserts that a time is within a time range (inclusive). + // +-// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") ++// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") + func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go +index 960bf6f2cab..3b5b09330a4 100644 +--- a/vendor/github.com/stretchr/testify/require/require_forward.go ++++ b/vendor/github.com/stretchr/testify/require/require_forward.go +@@ -31,9 +31,9 @@ func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...inte + // Contains asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// a.Contains("Hello World", "World") +-// a.Contains(["Hello", "World"], "World") +-// a.Contains({"Hello": "World"}, "Hello") ++// a.Contains("Hello World", "World") ++// a.Contains(["Hello", "World"], "World") ++// a.Contains({"Hello": "World"}, "Hello") + func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -44,9 +44,9 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs .. + // Containsf asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// a.Containsf("Hello World", "World", "error message %s", "formatted") +-// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +-// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") ++// a.Containsf("Hello World", "World", "error message %s", "formatted") ++// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") ++// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") + func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -99,7 +99,7 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st + // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// a.Empty(obj) ++// a.Empty(obj) + func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -110,7 +110,7 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// a.Emptyf(obj, "error message %s", "formatted") ++// a.Emptyf(obj, "error message %s", "formatted") + func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -120,7 +120,7 @@ func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) + + // Equal asserts that two objects are equal. + // +-// a.Equal(123, 123) ++// a.Equal(123, 123) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -135,8 +135,8 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs + // EqualError asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// a.EqualError(err, expectedErrorString) ++// actualObj, err := SomeFunction() ++// a.EqualError(err, expectedErrorString) + func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -147,8 +147,8 @@ func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ... + // EqualErrorf asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") + func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -156,10 +156,44 @@ func (a *Assertions) EqualErrorf(theError error, errString string, msg string, a + EqualErrorf(a.t, theError, errString, msg, args...) + } + ++// EqualExportedValues asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// a.EqualExportedValues(S{1, 2}, S{1, 3}) => true ++// a.EqualExportedValues(S{1, 2}, S{2, 3}) => false ++func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ EqualExportedValues(a.t, expected, actual, msgAndArgs...) ++} ++ ++// EqualExportedValuesf asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true ++// a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false ++func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ EqualExportedValuesf(a.t, expected, actual, msg, args...) ++} ++ + // EqualValues asserts that two objects are equal or convertable to the same types + // and equal. + // +-// a.EqualValues(uint32(123), int32(123)) ++// a.EqualValues(uint32(123), int32(123)) + func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -170,7 +204,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn + // EqualValuesf asserts that two objects are equal or convertable to the same types + // and equal. + // +-// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") ++// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") + func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -180,7 +214,7 @@ func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg + + // Equalf asserts that two objects are equal. + // +-// a.Equalf(123, 123, "error message %s", "formatted") ++// a.Equalf(123, 123, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -194,10 +228,10 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string + + // Error asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.Error(err) { +-// assert.Equal(t, expectedError, err) +-// } ++// actualObj, err := SomeFunction() ++// if a.Error(err) { ++// assert.Equal(t, expectedError, err) ++// } + func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -226,8 +260,8 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. + // ErrorContains asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// a.ErrorContains(err, expectedErrorSubString) ++// actualObj, err := SomeFunction() ++// a.ErrorContains(err, expectedErrorSubString) + func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -238,8 +272,8 @@ func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs . + // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") + func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -267,10 +301,10 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter + + // Errorf asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.Errorf(err, "error message %s", "formatted") { +-// assert.Equal(t, expectedErrorf, err) +-// } ++// actualObj, err := SomeFunction() ++// if a.Errorf(err, "error message %s", "formatted") { ++// assert.Equal(t, expectedErrorf, err) ++// } + func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -281,7 +315,7 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { + // Eventually asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) ++// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) + func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -289,10 +323,60 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti + Eventually(a.t, condition, waitFor, tick, msgAndArgs...) + } + ++// EventuallyWithT asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// a.EventuallyWithT(func(c *assert.CollectT) { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...) ++} ++ ++// EventuallyWithTf asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...) ++} ++ + // Eventuallyf asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -302,7 +386,7 @@ func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, t + + // Exactly asserts that two objects are equal in value and type. + // +-// a.Exactly(int32(123), int64(123)) ++// a.Exactly(int32(123), int64(123)) + func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -312,7 +396,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg + + // Exactlyf asserts that two objects are equal in value and type. + // +-// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") ++// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") + func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -354,7 +438,7 @@ func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{ + + // False asserts that the specified value is false. + // +-// a.False(myBool) ++// a.False(myBool) + func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -364,7 +448,7 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + + // Falsef asserts that the specified value is false. + // +-// a.Falsef(myBool, "error message %s", "formatted") ++// a.Falsef(myBool, "error message %s", "formatted") + func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -392,9 +476,9 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { + + // Greater asserts that the first element is greater than the second + // +-// a.Greater(2, 1) +-// a.Greater(float64(2), float64(1)) +-// a.Greater("b", "a") ++// a.Greater(2, 1) ++// a.Greater(float64(2), float64(1)) ++// a.Greater("b", "a") + func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -404,10 +488,10 @@ func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...inter + + // GreaterOrEqual asserts that the first element is greater than or equal to the second + // +-// a.GreaterOrEqual(2, 1) +-// a.GreaterOrEqual(2, 2) +-// a.GreaterOrEqual("b", "a") +-// a.GreaterOrEqual("b", "b") ++// a.GreaterOrEqual(2, 1) ++// a.GreaterOrEqual(2, 2) ++// a.GreaterOrEqual("b", "a") ++// a.GreaterOrEqual("b", "b") + func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -417,10 +501,10 @@ func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs . + + // GreaterOrEqualf asserts that the first element is greater than or equal to the second + // +-// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +-// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +-// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +-// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") ++// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") ++// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") ++// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") ++// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") + func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -430,9 +514,9 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, + + // Greaterf asserts that the first element is greater than the second + // +-// a.Greaterf(2, 1, "error message %s", "formatted") +-// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") +-// a.Greaterf("b", "a", "error message %s", "formatted") ++// a.Greaterf(2, 1, "error message %s", "formatted") ++// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") ++// a.Greaterf("b", "a", "error message %s", "formatted") + func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -443,7 +527,7 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args . + // HTTPBodyContains asserts that a specified handler returns a + // body that contains a string. + // +-// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { +@@ -456,7 +540,7 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u + // HTTPBodyContainsf asserts that a specified handler returns a + // body that contains a string. + // +-// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { +@@ -469,7 +553,7 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, + // HTTPBodyNotContains asserts that a specified handler returns a + // body that does not contain a string. + // +-// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { +@@ -482,7 +566,7 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string + // HTTPBodyNotContainsf asserts that a specified handler returns a + // body that does not contain a string. + // +-// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { +@@ -494,7 +578,7 @@ func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method strin + + // HTTPError asserts that a specified handler returns an error status code. + // +-// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -506,7 +590,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri + + // HTTPErrorf asserts that a specified handler returns an error status code. + // +-// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -518,7 +602,7 @@ func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url str + + // HTTPRedirect asserts that a specified handler returns a redirect status code. + // +-// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -530,7 +614,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s + + // HTTPRedirectf asserts that a specified handler returns a redirect status code. + // +-// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -542,7 +626,7 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url + + // HTTPStatusCode asserts that a specified handler returns a specified status code. + // +-// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) ++// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { +@@ -554,7 +638,7 @@ func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url + + // HTTPStatusCodef asserts that a specified handler returns a specified status code. + // +-// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") ++// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { +@@ -566,7 +650,7 @@ func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, ur + + // HTTPSuccess asserts that a specified handler returns a success status code. + // +-// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) ++// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -578,7 +662,7 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st + + // HTTPSuccessf asserts that a specified handler returns a success status code. + // +-// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") ++// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -590,7 +674,7 @@ func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url s + + // Implements asserts that an object is implemented by the specified interface. + // +-// a.Implements((*MyInterface)(nil), new(MyObject)) ++// a.Implements((*MyInterface)(nil), new(MyObject)) + func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -600,7 +684,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, + + // Implementsf asserts that an object is implemented by the specified interface. + // +-// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") ++// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") + func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -610,7 +694,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} + + // InDelta asserts that the two numerals are within delta of each other. + // +-// a.InDelta(math.Pi, 22/7.0, 0.01) ++// a.InDelta(math.Pi, 22/7.0, 0.01) + func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -652,7 +736,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del + + // InDeltaf asserts that the two numerals are within delta of each other. + // +-// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") ++// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") + func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -694,9 +778,9 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo + + // IsDecreasing asserts that the collection is decreasing + // +-// a.IsDecreasing([]int{2, 1, 0}) +-// a.IsDecreasing([]float{2, 1}) +-// a.IsDecreasing([]string{"b", "a"}) ++// a.IsDecreasing([]int{2, 1, 0}) ++// a.IsDecreasing([]float{2, 1}) ++// a.IsDecreasing([]string{"b", "a"}) + func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -706,9 +790,9 @@ func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) + + // IsDecreasingf asserts that the collection is decreasing + // +-// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +-// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +-// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") ++// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") ++// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") ++// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") + func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -718,9 +802,9 @@ func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...inter + + // IsIncreasing asserts that the collection is increasing + // +-// a.IsIncreasing([]int{1, 2, 3}) +-// a.IsIncreasing([]float{1, 2}) +-// a.IsIncreasing([]string{"a", "b"}) ++// a.IsIncreasing([]int{1, 2, 3}) ++// a.IsIncreasing([]float{1, 2}) ++// a.IsIncreasing([]string{"a", "b"}) + func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -730,9 +814,9 @@ func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) + + // IsIncreasingf asserts that the collection is increasing + // +-// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +-// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +-// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") ++// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") ++// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") ++// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") + func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -742,9 +826,9 @@ func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...inter + + // IsNonDecreasing asserts that the collection is not decreasing + // +-// a.IsNonDecreasing([]int{1, 1, 2}) +-// a.IsNonDecreasing([]float{1, 2}) +-// a.IsNonDecreasing([]string{"a", "b"}) ++// a.IsNonDecreasing([]int{1, 1, 2}) ++// a.IsNonDecreasing([]float{1, 2}) ++// a.IsNonDecreasing([]string{"a", "b"}) + func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -754,9 +838,9 @@ func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface + + // IsNonDecreasingf asserts that the collection is not decreasing + // +-// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +-// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +-// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") + func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -766,9 +850,9 @@ func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...in + + // IsNonIncreasing asserts that the collection is not increasing + // +-// a.IsNonIncreasing([]int{2, 1, 1}) +-// a.IsNonIncreasing([]float{2, 1}) +-// a.IsNonIncreasing([]string{"b", "a"}) ++// a.IsNonIncreasing([]int{2, 1, 1}) ++// a.IsNonIncreasing([]float{2, 1}) ++// a.IsNonIncreasing([]string{"b", "a"}) + func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -778,9 +862,9 @@ func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface + + // IsNonIncreasingf asserts that the collection is not increasing + // +-// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +-// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +-// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") + func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -806,7 +890,7 @@ func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg s + + // JSONEq asserts that two JSON strings are equivalent. + // +-// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) ++// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -816,7 +900,7 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf + + // JSONEqf asserts that two JSON strings are equivalent. + // +-// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") ++// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") + func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -827,7 +911,7 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. + // Len asserts that the specified object has specific length. + // Len also fails if the object has a type that len() not accept. + // +-// a.Len(mySlice, 3) ++// a.Len(mySlice, 3) + func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -838,7 +922,7 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface + // Lenf asserts that the specified object has specific length. + // Lenf also fails if the object has a type that len() not accept. + // +-// a.Lenf(mySlice, 3, "error message %s", "formatted") ++// a.Lenf(mySlice, 3, "error message %s", "formatted") + func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -848,9 +932,9 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in + + // Less asserts that the first element is less than the second + // +-// a.Less(1, 2) +-// a.Less(float64(1), float64(2)) +-// a.Less("a", "b") ++// a.Less(1, 2) ++// a.Less(float64(1), float64(2)) ++// a.Less("a", "b") + func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -860,10 +944,10 @@ func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interfac + + // LessOrEqual asserts that the first element is less than or equal to the second + // +-// a.LessOrEqual(1, 2) +-// a.LessOrEqual(2, 2) +-// a.LessOrEqual("a", "b") +-// a.LessOrEqual("b", "b") ++// a.LessOrEqual(1, 2) ++// a.LessOrEqual(2, 2) ++// a.LessOrEqual("a", "b") ++// a.LessOrEqual("b", "b") + func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -873,10 +957,10 @@ func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...i + + // LessOrEqualf asserts that the first element is less than or equal to the second + // +-// a.LessOrEqualf(1, 2, "error message %s", "formatted") +-// a.LessOrEqualf(2, 2, "error message %s", "formatted") +-// a.LessOrEqualf("a", "b", "error message %s", "formatted") +-// a.LessOrEqualf("b", "b", "error message %s", "formatted") ++// a.LessOrEqualf(1, 2, "error message %s", "formatted") ++// a.LessOrEqualf(2, 2, "error message %s", "formatted") ++// a.LessOrEqualf("a", "b", "error message %s", "formatted") ++// a.LessOrEqualf("b", "b", "error message %s", "formatted") + func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -886,9 +970,9 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar + + // Lessf asserts that the first element is less than the second + // +-// a.Lessf(1, 2, "error message %s", "formatted") +-// a.Lessf(float64(1), float64(2), "error message %s", "formatted") +-// a.Lessf("a", "b", "error message %s", "formatted") ++// a.Lessf(1, 2, "error message %s", "formatted") ++// a.Lessf(float64(1), float64(2), "error message %s", "formatted") ++// a.Lessf("a", "b", "error message %s", "formatted") + func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -898,8 +982,8 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i + + // Negative asserts that the specified element is negative + // +-// a.Negative(-1) +-// a.Negative(-1.23) ++// a.Negative(-1) ++// a.Negative(-1.23) + func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -909,8 +993,8 @@ func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) { + + // Negativef asserts that the specified element is negative + // +-// a.Negativef(-1, "error message %s", "formatted") +-// a.Negativef(-1.23, "error message %s", "formatted") ++// a.Negativef(-1, "error message %s", "formatted") ++// a.Negativef(-1.23, "error message %s", "formatted") + func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -921,7 +1005,7 @@ func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) { + // Never asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) ++// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) + func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -932,7 +1016,7 @@ func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick ti + // Neverf asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -942,7 +1026,7 @@ func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick t + + // Nil asserts that the specified object is nil. + // +-// a.Nil(err) ++// a.Nil(err) + func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -952,7 +1036,7 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + + // Nilf asserts that the specified object is nil. + // +-// a.Nilf(err, "error message %s", "formatted") ++// a.Nilf(err, "error message %s", "formatted") + func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -980,10 +1064,10 @@ func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) + + // NoError asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.NoError(err) { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if a.NoError(err) { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -993,10 +1077,10 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { + + // NoErrorf asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.NoErrorf(err, "error message %s", "formatted") { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if a.NoErrorf(err, "error message %s", "formatted") { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1025,9 +1109,9 @@ func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// a.NotContains("Hello World", "Earth") +-// a.NotContains(["Hello", "World"], "Earth") +-// a.NotContains({"Hello": "World"}, "Earth") ++// a.NotContains("Hello World", "Earth") ++// a.NotContains(["Hello", "World"], "Earth") ++// a.NotContains({"Hello": "World"}, "Earth") + func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1038,9 +1122,9 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs + // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +-// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +-// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") ++// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") ++// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") ++// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") + func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1051,9 +1135,9 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if a.NotEmpty(obj) { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if a.NotEmpty(obj) { ++// assert.Equal(t, "two", obj[1]) ++// } + func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1064,9 +1148,9 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if a.NotEmptyf(obj, "error message %s", "formatted") { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if a.NotEmptyf(obj, "error message %s", "formatted") { ++// assert.Equal(t, "two", obj[1]) ++// } + func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1076,7 +1160,7 @@ func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface + + // NotEqual asserts that the specified values are NOT equal. + // +-// a.NotEqual(obj1, obj2) ++// a.NotEqual(obj1, obj2) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1089,7 +1173,7 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr + + // NotEqualValues asserts that two objects are not equal even when converted to the same type + // +-// a.NotEqualValues(obj1, obj2) ++// a.NotEqualValues(obj1, obj2) + func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1099,7 +1183,7 @@ func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, ms + + // NotEqualValuesf asserts that two objects are not equal even when converted to the same type + // +-// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") ++// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") + func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1109,7 +1193,7 @@ func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, m + + // NotEqualf asserts that the specified values are NOT equal. + // +-// a.NotEqualf(obj1, obj2, "error message %s", "formatted") ++// a.NotEqualf(obj1, obj2, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1140,7 +1224,7 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in + + // NotNil asserts that the specified object is not nil. + // +-// a.NotNil(err) ++// a.NotNil(err) + func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1150,7 +1234,7 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + + // NotNilf asserts that the specified object is not nil. + // +-// a.NotNilf(err, "error message %s", "formatted") ++// a.NotNilf(err, "error message %s", "formatted") + func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1160,7 +1244,7 @@ func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{} + + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// a.NotPanics(func(){ RemainCalm() }) ++// a.NotPanics(func(){ RemainCalm() }) + func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1170,7 +1254,7 @@ func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{} + + // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") ++// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") + func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1180,8 +1264,8 @@ func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...inte + + // NotRegexp asserts that a specified regexp does not match a string. + // +-// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +-// a.NotRegexp("^start", "it's not starting") ++// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") ++// a.NotRegexp("^start", "it's not starting") + func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1191,8 +1275,8 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in + + // NotRegexpf asserts that a specified regexp does not match a string. + // +-// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +-// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") ++// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") ++// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") + func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1202,7 +1286,7 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg + + // NotSame asserts that two pointers do not reference the same object. + // +-// a.NotSame(ptr1, ptr2) ++// a.NotSame(ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1215,7 +1299,7 @@ func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArg + + // NotSamef asserts that two pointers do not reference the same object. + // +-// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") ++// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1229,7 +1313,7 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri + // NotSubset asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") ++// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") + func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1240,7 +1324,7 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs + // NotSubsetf asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") ++// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") + func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1266,7 +1350,7 @@ func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) { + + // Panics asserts that the code inside the specified PanicTestFunc panics. + // +-// a.Panics(func(){ GoCrazy() }) ++// a.Panics(func(){ GoCrazy() }) + func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1278,7 +1362,7 @@ func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// a.PanicsWithError("crazy error", func(){ GoCrazy() }) ++// a.PanicsWithError("crazy error", func(){ GoCrazy() }) + func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1290,7 +1374,7 @@ func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, m + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1301,7 +1385,7 @@ func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, + // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) ++// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) + func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1312,7 +1396,7 @@ func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFun + // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1322,7 +1406,7 @@ func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFu + + // Panicsf asserts that the code inside the specified PanicTestFunc panics. + // +-// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") ++// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1332,8 +1416,8 @@ func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interfa + + // Positive asserts that the specified element is positive + // +-// a.Positive(1) +-// a.Positive(1.23) ++// a.Positive(1) ++// a.Positive(1.23) + func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1343,8 +1427,8 @@ func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) { + + // Positivef asserts that the specified element is positive + // +-// a.Positivef(1, "error message %s", "formatted") +-// a.Positivef(1.23, "error message %s", "formatted") ++// a.Positivef(1, "error message %s", "formatted") ++// a.Positivef(1.23, "error message %s", "formatted") + func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1354,8 +1438,8 @@ func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) { + + // Regexp asserts that a specified regexp matches a string. + // +-// a.Regexp(regexp.MustCompile("start"), "it's starting") +-// a.Regexp("start...$", "it's not starting") ++// a.Regexp(regexp.MustCompile("start"), "it's starting") ++// a.Regexp("start...$", "it's not starting") + func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1365,8 +1449,8 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter + + // Regexpf asserts that a specified regexp matches a string. + // +-// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +-// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") ++// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") ++// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") + func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1376,7 +1460,7 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . + + // Same asserts that two pointers reference the same object. + // +-// a.Same(ptr1, ptr2) ++// a.Same(ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1389,7 +1473,7 @@ func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs . + + // Samef asserts that two pointers reference the same object. + // +-// a.Samef(ptr1, ptr2, "error message %s", "formatted") ++// a.Samef(ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1403,7 +1487,7 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, + // Subset asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") ++// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") + func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1414,7 +1498,7 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... + // Subsetf asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") ++// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") + func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1424,7 +1508,7 @@ func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, a + + // True asserts that the specified value is true. + // +-// a.True(myBool) ++// a.True(myBool) + func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1434,7 +1518,7 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + + // Truef asserts that the specified value is true. + // +-// a.Truef(myBool, "error message %s", "formatted") ++// a.Truef(myBool, "error message %s", "formatted") + func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1444,7 +1528,7 @@ func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { + + // WithinDuration asserts that the two times are within duration delta of each other. + // +-// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) ++// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) + func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1454,7 +1538,7 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta + + // WithinDurationf asserts that the two times are within duration delta of each other. + // +-// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") ++// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") + func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1464,7 +1548,7 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta + + // WithinRange asserts that a time is within a time range (inclusive). + // +-// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) ++// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) + func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1474,7 +1558,7 @@ func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Tim + + // WithinRangef asserts that a time is within a time range (inclusive). + // +-// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") ++// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") + func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +new file mode 100644 +index 00000000000..67f8d733999 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +@@ -0,0 +1,229 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" ++ ++import ( ++ "go.opentelemetry.io/otel" ++ "go.opentelemetry.io/otel/attribute" ++ "go.opentelemetry.io/otel/metric" ++ "go.opentelemetry.io/otel/propagation" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ++ "go.opentelemetry.io/otel/trace" ++) ++ ++const ( ++ // ScopeName is the instrumentation scope name. ++ ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" ++ // GRPCStatusCodeKey is convention for numeric status code of a gRPC request. ++ GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ++) ++ ++// Filter is a predicate used to determine whether a given request in ++// interceptor info should be traced. A Filter must return true if ++// the request should be traced. ++type Filter func(*InterceptorInfo) bool ++ ++// config is a group of options for this instrumentation. ++type config struct { ++ Filter Filter ++ Propagators propagation.TextMapPropagator ++ TracerProvider trace.TracerProvider ++ MeterProvider metric.MeterProvider ++ SpanStartOptions []trace.SpanStartOption ++ ++ ReceivedEvent bool ++ SentEvent bool ++ ++ tracer trace.Tracer ++ meter metric.Meter ++ ++ rpcDuration metric.Float64Histogram ++ rpcRequestSize metric.Int64Histogram ++ rpcResponseSize metric.Int64Histogram ++ rpcRequestsPerRPC metric.Int64Histogram ++ rpcResponsesPerRPC metric.Int64Histogram ++} ++ ++// Option applies an option value for a config. ++type Option interface { ++ apply(*config) ++} ++ ++// newConfig returns a config configured with all the passed Options. ++func newConfig(opts []Option, role string) *config { ++ c := &config{ ++ Propagators: otel.GetTextMapPropagator(), ++ TracerProvider: otel.GetTracerProvider(), ++ MeterProvider: otel.GetMeterProvider(), ++ } ++ for _, o := range opts { ++ o.apply(c) ++ } ++ ++ c.tracer = c.TracerProvider.Tracer( ++ ScopeName, ++ trace.WithInstrumentationVersion(SemVersion()), ++ ) ++ ++ c.meter = c.MeterProvider.Meter( ++ ScopeName, ++ metric.WithInstrumentationVersion(Version()), ++ metric.WithSchemaURL(semconv.SchemaURL), ++ ) ++ ++ var err error ++ c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration", ++ metric.WithDescription("Measures the duration of inbound RPC."), ++ metric.WithUnit("ms")) ++ if err != nil { ++ otel.Handle(err) ++ } ++ ++ c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", ++ metric.WithDescription("Measures size of RPC request messages (uncompressed)."), ++ metric.WithUnit("By")) ++ if err != nil { ++ otel.Handle(err) ++ } ++ ++ c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", ++ metric.WithDescription("Measures size of RPC response messages (uncompressed)."), ++ metric.WithUnit("By")) ++ if err != nil { ++ otel.Handle(err) ++ } ++ ++ c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", ++ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), ++ metric.WithUnit("{count}")) ++ if err != nil { ++ otel.Handle(err) ++ } ++ ++ c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", ++ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), ++ metric.WithUnit("{count}")) ++ if err != nil { ++ otel.Handle(err) ++ } ++ ++ return c ++} ++ ++type propagatorsOption struct{ p propagation.TextMapPropagator } ++ ++func (o propagatorsOption) apply(c *config) { ++ if o.p != nil { ++ c.Propagators = o.p ++ } ++} ++ ++// WithPropagators returns an Option to use the Propagators when extracting ++// and injecting trace context from requests. ++func WithPropagators(p propagation.TextMapPropagator) Option { ++ return propagatorsOption{p: p} ++} ++ ++type tracerProviderOption struct{ tp trace.TracerProvider } ++ ++func (o tracerProviderOption) apply(c *config) { ++ if o.tp != nil { ++ c.TracerProvider = o.tp ++ } ++} ++ ++// WithInterceptorFilter returns an Option to use the request filter. ++// ++// Deprecated: Use stats handlers instead. ++func WithInterceptorFilter(f Filter) Option { ++ return interceptorFilterOption{f: f} ++} ++ ++type interceptorFilterOption struct { ++ f Filter ++} ++ ++func (o interceptorFilterOption) apply(c *config) { ++ if o.f != nil { ++ c.Filter = o.f ++ } ++} ++ ++// WithTracerProvider returns an Option to use the TracerProvider when ++// creating a Tracer. ++func WithTracerProvider(tp trace.TracerProvider) Option { ++ return tracerProviderOption{tp: tp} ++} ++ ++type meterProviderOption struct{ mp metric.MeterProvider } ++ ++func (o meterProviderOption) apply(c *config) { ++ if o.mp != nil { ++ c.MeterProvider = o.mp ++ } ++} ++ ++// WithMeterProvider returns an Option to use the MeterProvider when ++// creating a Meter. If this option is not provide the global MeterProvider will be used. ++func WithMeterProvider(mp metric.MeterProvider) Option { ++ return meterProviderOption{mp: mp} ++} ++ ++// Event type that can be recorded, see WithMessageEvents. ++type Event int ++ ++// Different types of events that can be recorded, see WithMessageEvents. ++const ( ++ ReceivedEvents Event = iota ++ SentEvents ++) ++ ++type messageEventsProviderOption struct { ++ events []Event ++} ++ ++func (m messageEventsProviderOption) apply(c *config) { ++ for _, e := range m.events { ++ switch e { ++ case ReceivedEvents: ++ c.ReceivedEvent = true ++ case SentEvents: ++ c.SentEvent = true ++ } ++ } ++} ++ ++// WithMessageEvents configures the Handler to record the specified events ++// (span.AddEvent) on spans. By default only summary attributes are added at the ++// end of the request. ++// ++// Valid events are: ++// - ReceivedEvents: Record the number of bytes read after every gRPC read operation. ++// - SentEvents: Record the number of bytes written after every gRPC write operation. ++func WithMessageEvents(events ...Event) Option { ++ return messageEventsProviderOption{events: events} ++} ++ ++type spanStartOption struct{ opts []trace.SpanStartOption } ++ ++func (o spanStartOption) apply(c *config) { ++ c.SpanStartOptions = append(c.SpanStartOptions, o.opts...) ++} ++ ++// WithSpanOptions configures an additional set of ++// trace.SpanOptions, which are applied to each new span. ++func WithSpanOptions(opts ...trace.SpanStartOption) Option { ++ return spanStartOption{opts} ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go +new file mode 100644 +index 00000000000..958dcd87a4c +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go +@@ -0,0 +1,22 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++/* ++Package otelgrpc is the instrumentation library for [google.golang.org/grpc]. ++ ++Use [NewClientHandler] with [grpc.WithStatsHandler] to instrument a gRPC client. ++ ++Use [NewServerHandler] with [grpc.StatsHandler] to instrument a gRPC server. ++*/ ++package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go +deleted file mode 100644 +index f512cf6e315..00000000000 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go ++++ /dev/null +@@ -1,163 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" +- +-import ( +- "context" +- +- "google.golang.org/grpc/metadata" +- +- "go.opentelemetry.io/otel" +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/baggage" +- "go.opentelemetry.io/otel/propagation" +- "go.opentelemetry.io/otel/trace" +-) +- +-const ( +- // instrumentationName is the name of this instrumentation package. +- instrumentationName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" +- // GRPCStatusCodeKey is convention for numeric status code of a gRPC request. +- GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +-) +- +-// Filter is a predicate used to determine whether a given request in +-// interceptor info should be traced. A Filter must return true if +-// the request should be traced. +-type Filter func(*InterceptorInfo) bool +- +-// config is a group of options for this instrumentation. +-type config struct { +- Filter Filter +- Propagators propagation.TextMapPropagator +- TracerProvider trace.TracerProvider +-} +- +-// Option applies an option value for a config. +-type Option interface { +- apply(*config) +-} +- +-// newConfig returns a config configured with all the passed Options. +-func newConfig(opts []Option) *config { +- c := &config{ +- Propagators: otel.GetTextMapPropagator(), +- TracerProvider: otel.GetTracerProvider(), +- } +- for _, o := range opts { +- o.apply(c) +- } +- return c +-} +- +-type propagatorsOption struct{ p propagation.TextMapPropagator } +- +-func (o propagatorsOption) apply(c *config) { +- if o.p != nil { +- c.Propagators = o.p +- } +-} +- +-// WithPropagators returns an Option to use the Propagators when extracting +-// and injecting trace context from requests. +-func WithPropagators(p propagation.TextMapPropagator) Option { +- return propagatorsOption{p: p} +-} +- +-type tracerProviderOption struct{ tp trace.TracerProvider } +- +-func (o tracerProviderOption) apply(c *config) { +- if o.tp != nil { +- c.TracerProvider = o.tp +- } +-} +- +-// WithInterceptorFilter returns an Option to use the request filter. +-func WithInterceptorFilter(f Filter) Option { +- return interceptorFilterOption{f: f} +-} +- +-type interceptorFilterOption struct { +- f Filter +-} +- +-func (o interceptorFilterOption) apply(c *config) { +- if o.f != nil { +- c.Filter = o.f +- } +-} +- +-// WithTracerProvider returns an Option to use the TracerProvider when +-// creating a Tracer. +-func WithTracerProvider(tp trace.TracerProvider) Option { +- return tracerProviderOption{tp: tp} +-} +- +-type metadataSupplier struct { +- metadata *metadata.MD +-} +- +-// assert that metadataSupplier implements the TextMapCarrier interface. +-var _ propagation.TextMapCarrier = &metadataSupplier{} +- +-func (s *metadataSupplier) Get(key string) string { +- values := s.metadata.Get(key) +- if len(values) == 0 { +- return "" +- } +- return values[0] +-} +- +-func (s *metadataSupplier) Set(key string, value string) { +- s.metadata.Set(key, value) +-} +- +-func (s *metadataSupplier) Keys() []string { +- out := make([]string, 0, len(*s.metadata)) +- for key := range *s.metadata { +- out = append(out, key) +- } +- return out +-} +- +-// Inject injects correlation context and span context into the gRPC +-// metadata object. This function is meant to be used on outgoing +-// requests. +-func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { +- c := newConfig(opts) +- inject(ctx, md, c.Propagators) +-} +- +-func inject(ctx context.Context, md *metadata.MD, propagators propagation.TextMapPropagator) { +- propagators.Inject(ctx, &metadataSupplier{ +- metadata: md, +- }) +-} +- +-// Extract returns the correlation context and span context that +-// another service encoded in the gRPC metadata object with Inject. +-// This function is meant to be used on incoming requests. +-func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { +- c := newConfig(opts) +- return extract(ctx, md, c.Propagators) +-} +- +-func extract(ctx context.Context, md *metadata.MD, propagators propagation.TextMapPropagator) (baggage.Baggage, trace.SpanContext) { +- ctx = propagators.Extract(ctx, &metadataSupplier{ +- metadata: md, +- }) +- +- return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) +-} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +index 26343dfc16e..fa015e9ac88 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +@@ -20,41 +20,37 @@ import ( + "context" + "io" + "net" +- +- "github.com/golang/protobuf/proto" // nolint:staticcheck ++ "strconv" ++ "time" + + "google.golang.org/grpc" + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" ++ "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/codes" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ "go.opentelemetry.io/otel/metric" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" + ) + + type messageType attribute.KeyValue + + // Event adds an event of the messageType to the span associated with the +-// passed context with id and size (if message is a proto message). +-func (m messageType) Event(ctx context.Context, id int, message interface{}) { ++// passed context with a message id. ++func (m messageType) Event(ctx context.Context, id int, _ interface{}) { + span := trace.SpanFromContext(ctx) +- if p, ok := message.(proto.Message); ok { +- span.AddEvent("message", trace.WithAttributes( +- attribute.KeyValue(m), +- RPCMessageIDKey.Int(id), +- RPCMessageUncompressedSizeKey.Int(proto.Size(p)), +- )) +- } else { +- span.AddEvent("message", trace.WithAttributes( +- attribute.KeyValue(m), +- RPCMessageIDKey.Int(id), +- )) ++ if !span.IsRecording() { ++ return + } ++ span.AddEvent("message", trace.WithAttributes( ++ attribute.KeyValue(m), ++ RPCMessageIDKey.Int(id), ++ )) + } + + var ( +@@ -64,8 +60,15 @@ var ( + + // UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable + // for use in a grpc.Dial call. ++// ++// Deprecated: Use [NewClientHandler] instead. + func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { +- cfg := newConfig(opts) ++ cfg := newConfig(opts, "client") ++ tracer := cfg.TracerProvider.Tracer( ++ ScopeName, ++ trace.WithInstrumentationVersion(Version()), ++ ) ++ + return func( + ctx context.Context, + method string, +@@ -82,32 +85,33 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { + return invoker(ctx, method, req, reply, cc, callOpts...) + } + +- requestMetadata, _ := metadata.FromOutgoingContext(ctx) +- metadataCopy := requestMetadata.Copy() ++ name, attr, _ := telemetryAttributes(method, cc.Target()) + +- tracer := cfg.TracerProvider.Tracer( +- instrumentationName, +- trace.WithInstrumentationVersion(SemVersion()), ++ startOpts := append([]trace.SpanStartOption{ ++ trace.WithSpanKind(trace.SpanKindClient), ++ trace.WithAttributes(attr...), ++ }, ++ cfg.SpanStartOptions..., + ) + +- name, attr := spanInfo(method, cc.Target()) +- var span trace.Span +- ctx, span = tracer.Start( ++ ctx, span := tracer.Start( + ctx, + name, +- trace.WithSpanKind(trace.SpanKindClient), +- trace.WithAttributes(attr...), ++ startOpts..., + ) + defer span.End() + +- inject(ctx, &metadataCopy, cfg.Propagators) +- ctx = metadata.NewOutgoingContext(ctx, metadataCopy) ++ ctx = inject(ctx, cfg.Propagators) + +- messageSent.Event(ctx, 1, req) ++ if cfg.SentEvent { ++ messageSent.Event(ctx, 1, req) ++ } + + err := invoker(ctx, method, req, reply, cc, callOpts...) + +- messageReceived.Event(ctx, 1, reply) ++ if cfg.ReceivedEvent { ++ messageReceived.Event(ctx, 1, reply) ++ } + + if err != nil { + s, _ := status.FromError(err) +@@ -143,6 +147,9 @@ type clientStream struct { + eventsDone chan struct{} + finished chan error + ++ receivedEvent bool ++ sentEvent bool ++ + receivedMessageID int + sentMessageID int + } +@@ -160,7 +167,10 @@ func (w *clientStream) RecvMsg(m interface{}) error { + w.sendStreamEvent(errorEvent, err) + } else { + w.receivedMessageID++ +- messageReceived.Event(w.Context(), w.receivedMessageID, m) ++ ++ if w.receivedEvent { ++ messageReceived.Event(w.Context(), w.receivedMessageID, m) ++ } + } + + return err +@@ -170,7 +180,10 @@ func (w *clientStream) SendMsg(m interface{}) error { + err := w.ClientStream.SendMsg(m) + + w.sentMessageID++ +- messageSent.Event(w.Context(), w.sentMessageID, m) ++ ++ if w.sentEvent { ++ messageSent.Event(w.Context(), w.sentMessageID, m) ++ } + + if err != nil { + w.sendStreamEvent(errorEvent, err) +@@ -181,7 +194,6 @@ func (w *clientStream) SendMsg(m interface{}) error { + + func (w *clientStream) Header() (metadata.MD, error) { + md, err := w.ClientStream.Header() +- + if err != nil { + w.sendStreamEvent(errorEvent, err) + } +@@ -191,7 +203,6 @@ func (w *clientStream) Header() (metadata.MD, error) { + + func (w *clientStream) CloseSend() error { + err := w.ClientStream.CloseSend() +- + if err != nil { + w.sendStreamEvent(errorEvent, err) + } +@@ -199,7 +210,7 @@ func (w *clientStream) CloseSend() error { + return err + } + +-func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc) *clientStream { ++func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, cfg *config) *clientStream { + events := make(chan streamEvent) + eventsDone := make(chan struct{}) + finished := make(chan error) +@@ -226,11 +237,13 @@ func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.Strea + }() + + return &clientStream{ +- ClientStream: s, +- desc: desc, +- events: events, +- eventsDone: eventsDone, +- finished: finished, ++ ClientStream: s, ++ desc: desc, ++ events: events, ++ eventsDone: eventsDone, ++ finished: finished, ++ receivedEvent: cfg.ReceivedEvent, ++ sentEvent: cfg.SentEvent, + } + } + +@@ -243,8 +256,15 @@ func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) { + + // StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable + // for use in a grpc.Dial call. ++// ++// Deprecated: Use [NewClientHandler] instead. + func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { +- cfg := newConfig(opts) ++ cfg := newConfig(opts, "client") ++ tracer := cfg.TracerProvider.Tracer( ++ ScopeName, ++ trace.WithInstrumentationVersion(Version()), ++ ) ++ + return func( + ctx context.Context, + desc *grpc.StreamDesc, +@@ -261,25 +281,22 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + return streamer(ctx, desc, cc, method, callOpts...) + } + +- requestMetadata, _ := metadata.FromOutgoingContext(ctx) +- metadataCopy := requestMetadata.Copy() ++ name, attr, _ := telemetryAttributes(method, cc.Target()) + +- tracer := cfg.TracerProvider.Tracer( +- instrumentationName, +- trace.WithInstrumentationVersion(SemVersion()), ++ startOpts := append([]trace.SpanStartOption{ ++ trace.WithSpanKind(trace.SpanKindClient), ++ trace.WithAttributes(attr...), ++ }, ++ cfg.SpanStartOptions..., + ) + +- name, attr := spanInfo(method, cc.Target()) +- var span trace.Span +- ctx, span = tracer.Start( ++ ctx, span := tracer.Start( + ctx, + name, +- trace.WithSpanKind(trace.SpanKindClient), +- trace.WithAttributes(attr...), ++ startOpts..., + ) + +- inject(ctx, &metadataCopy, cfg.Propagators) +- ctx = metadata.NewOutgoingContext(ctx, metadataCopy) ++ ctx = inject(ctx, cfg.Propagators) + + s, err := streamer(ctx, desc, cc, method, callOpts...) + if err != nil { +@@ -289,7 +306,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + span.End() + return s, err + } +- stream := wrapClientStream(ctx, s, desc) ++ stream := wrapClientStream(ctx, s, desc, cfg) + + go func() { + err := <-stream.finished +@@ -311,8 +328,15 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + + // UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable + // for use in a grpc.NewServer call. ++// ++// Deprecated: Use [NewServerHandler] instead. + func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { +- cfg := newConfig(opts) ++ cfg := newConfig(opts, "server") ++ tracer := cfg.TracerProvider.Tracer( ++ ScopeName, ++ trace.WithInstrumentationVersion(Version()), ++ ) ++ + return func( + ctx context.Context, + req interface{}, +@@ -327,38 +351,49 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + return handler(ctx, req) + } + +- requestMetadata, _ := metadata.FromIncomingContext(ctx) +- metadataCopy := requestMetadata.Copy() +- +- bags, spanCtx := Extract(ctx, &metadataCopy, opts...) +- ctx = baggage.ContextWithBaggage(ctx, bags) ++ ctx = extract(ctx, cfg.Propagators) ++ name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx)) + +- tracer := cfg.TracerProvider.Tracer( +- instrumentationName, +- trace.WithInstrumentationVersion(SemVersion()), ++ startOpts := append([]trace.SpanStartOption{ ++ trace.WithSpanKind(trace.SpanKindServer), ++ trace.WithAttributes(attr...), ++ }, ++ cfg.SpanStartOptions..., + ) + +- name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) + ctx, span := tracer.Start( +- trace.ContextWithRemoteSpanContext(ctx, spanCtx), ++ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, +- trace.WithSpanKind(trace.SpanKindServer), +- trace.WithAttributes(attr...), ++ startOpts..., + ) + defer span.End() + +- messageReceived.Event(ctx, 1, req) ++ if cfg.ReceivedEvent { ++ messageReceived.Event(ctx, 1, req) ++ } ++ ++ before := time.Now() + + resp, err := handler(ctx, req) ++ ++ s, _ := status.FromError(err) + if err != nil { +- s, _ := status.FromError(err) +- span.SetStatus(codes.Error, s.Message()) +- span.SetAttributes(statusCodeAttr(s.Code())) +- messageSent.Event(ctx, 1, s.Proto()) ++ statusCode, msg := serverStatus(s) ++ span.SetStatus(statusCode, msg) ++ if cfg.SentEvent { ++ messageSent.Event(ctx, 1, s.Proto()) ++ } + } else { +- span.SetAttributes(statusCodeAttr(grpc_codes.OK)) +- messageSent.Event(ctx, 1, resp) ++ if cfg.SentEvent { ++ messageSent.Event(ctx, 1, resp) ++ } + } ++ grpcStatusCodeAttr := statusCodeAttr(s.Code()) ++ span.SetAttributes(grpcStatusCodeAttr) ++ ++ elapsedTime := time.Since(before).Milliseconds() ++ metricAttrs = append(metricAttrs, grpcStatusCodeAttr) ++ cfg.rpcDuration.Record(ctx, float64(elapsedTime), metric.WithAttributes(metricAttrs...)) + + return resp, err + } +@@ -372,6 +407,9 @@ type serverStream struct { + + receivedMessageID int + sentMessageID int ++ ++ receivedEvent bool ++ sentEvent bool + } + + func (w *serverStream) Context() context.Context { +@@ -383,7 +421,9 @@ func (w *serverStream) RecvMsg(m interface{}) error { + + if err == nil { + w.receivedMessageID++ +- messageReceived.Event(w.Context(), w.receivedMessageID, m) ++ if w.receivedEvent { ++ messageReceived.Event(w.Context(), w.receivedMessageID, m) ++ } + } + + return err +@@ -393,22 +433,33 @@ func (w *serverStream) SendMsg(m interface{}) error { + err := w.ServerStream.SendMsg(m) + + w.sentMessageID++ +- messageSent.Event(w.Context(), w.sentMessageID, m) ++ if w.sentEvent { ++ messageSent.Event(w.Context(), w.sentMessageID, m) ++ } + + return err + } + +-func wrapServerStream(ctx context.Context, ss grpc.ServerStream) *serverStream { ++func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream { + return &serverStream{ +- ServerStream: ss, +- ctx: ctx, ++ ServerStream: ss, ++ ctx: ctx, ++ receivedEvent: cfg.ReceivedEvent, ++ sentEvent: cfg.SentEvent, + } + } + + // StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable + // for use in a grpc.NewServer call. ++// ++// Deprecated: Use [NewServerHandler] instead. + func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { +- cfg := newConfig(opts) ++ cfg := newConfig(opts, "server") ++ tracer := cfg.TracerProvider.Tracer( ++ ScopeName, ++ trace.WithInstrumentationVersion(Version()), ++ ) ++ + return func( + srv interface{}, + ss grpc.ServerStream, +@@ -421,34 +472,31 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + Type: StreamServer, + } + if cfg.Filter != nil && !cfg.Filter(i) { +- return handler(srv, wrapServerStream(ctx, ss)) ++ return handler(srv, wrapServerStream(ctx, ss, cfg)) + } + +- requestMetadata, _ := metadata.FromIncomingContext(ctx) +- metadataCopy := requestMetadata.Copy() +- +- bags, spanCtx := Extract(ctx, &metadataCopy, opts...) +- ctx = baggage.ContextWithBaggage(ctx, bags) ++ ctx = extract(ctx, cfg.Propagators) ++ name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx)) + +- tracer := cfg.TracerProvider.Tracer( +- instrumentationName, +- trace.WithInstrumentationVersion(SemVersion()), ++ startOpts := append([]trace.SpanStartOption{ ++ trace.WithSpanKind(trace.SpanKindServer), ++ trace.WithAttributes(attr...), ++ }, ++ cfg.SpanStartOptions..., + ) + +- name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) + ctx, span := tracer.Start( +- trace.ContextWithRemoteSpanContext(ctx, spanCtx), ++ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, +- trace.WithSpanKind(trace.SpanKindServer), +- trace.WithAttributes(attr...), ++ startOpts..., + ) + defer span.End() + +- err := handler(srv, wrapServerStream(ctx, ss)) +- ++ err := handler(srv, wrapServerStream(ctx, ss, cfg)) + if err != nil { + s, _ := status.FromError(err) +- span.SetStatus(codes.Error, s.Message()) ++ statusCode, msg := serverStatus(s) ++ span.SetStatus(statusCode, msg) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) +@@ -458,31 +506,49 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + } + } + +-// spanInfo returns a span name and all appropriate attributes from the gRPC +-// method and peer address. +-func spanInfo(fullMethod, peerAddress string) (string, []attribute.KeyValue) { +- attrs := []attribute.KeyValue{RPCSystemGRPC} +- name, mAttrs := internal.ParseFullMethod(fullMethod) +- attrs = append(attrs, mAttrs...) +- attrs = append(attrs, peerAttr(peerAddress)...) +- return name, attrs ++// telemetryAttributes returns a span name and span and metric attributes from ++// the gRPC method and peer address. ++func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) { ++ name, methodAttrs := internal.ParseFullMethod(fullMethod) ++ peerAttrs := peerAttr(peerAddress) ++ ++ attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs)) ++ attrs = append(attrs, RPCSystemGRPC) ++ attrs = append(attrs, methodAttrs...) ++ metricAttrs := attrs[:1+len(methodAttrs)] ++ attrs = append(attrs, peerAttrs...) ++ return name, attrs, metricAttrs + } + + // peerAttr returns attributes about the peer address. + func peerAttr(addr string) []attribute.KeyValue { +- host, port, err := net.SplitHostPort(addr) ++ host, p, err := net.SplitHostPort(addr) + if err != nil { +- return []attribute.KeyValue(nil) ++ return nil + } + + if host == "" { + host = "127.0.0.1" + } ++ port, err := strconv.Atoi(p) ++ if err != nil { ++ return nil ++ } + +- return []attribute.KeyValue{ +- semconv.NetPeerIPKey.String(host), +- semconv.NetPeerPortKey.String(port), ++ var attr []attribute.KeyValue ++ if ip := net.ParseIP(host); ip != nil { ++ attr = []attribute.KeyValue{ ++ semconv.NetSockPeerAddr(host), ++ semconv.NetSockPeerPort(port), ++ } ++ } else { ++ attr = []attribute.KeyValue{ ++ semconv.NetPeerName(host), ++ semconv.NetPeerPort(port), ++ } + } ++ ++ return attr + } + + // peerFromCtx returns a peer address from a context, if one exists. +@@ -498,3 +564,26 @@ func peerFromCtx(ctx context.Context) string { + func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue { + return GRPCStatusCodeKey.Int64(int64(c)) + } ++ ++// serverStatus returns a span status code and message for a given gRPC ++// status code. It maps specific gRPC status codes to a corresponding span ++// status code and message. This function is intended for use on the server ++// side of a gRPC connection. ++// ++// If the gRPC status code is Unknown, DeadlineExceeded, Unimplemented, ++// Internal, Unavailable, or DataLoss, it returns a span status code of Error ++// and the message from the gRPC status. Otherwise, it returns a span status ++// code of Unset and an empty message. ++func serverStatus(grpcStatus *status.Status) (codes.Code, string) { ++ switch grpcStatus.Code() { ++ case grpc_codes.Unknown, ++ grpc_codes.DeadlineExceeded, ++ grpc_codes.Unimplemented, ++ grpc_codes.Internal, ++ grpc_codes.Unavailable, ++ grpc_codes.DataLoss: ++ return codes.Error, grpcStatus.Message() ++ default: ++ return codes.Unset, "" ++ } ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +index bc214d363a2..cf32a9e978c 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +@@ -18,26 +18,34 @@ import ( + "strings" + + "go.opentelemetry.io/otel/attribute" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + ) + + // ParseFullMethod returns a span name following the OpenTelemetry semantic + // conventions as well as all applicable span attribute.KeyValue attributes based + // on a gRPC's FullMethod. ++// ++// Parsing is consistent with grpc-go implementation: ++// https://github.com/grpc/grpc-go/blob/v1.57.0/internal/grpcutil/method.go#L26-L39 + func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) { +- name := strings.TrimLeft(fullMethod, "/") +- parts := strings.SplitN(name, "/", 2) +- if len(parts) != 2 { ++ if !strings.HasPrefix(fullMethod, "/") { ++ // Invalid format, does not follow `/package.service/method`. ++ return fullMethod, nil ++ } ++ name := fullMethod[1:] ++ pos := strings.LastIndex(name, "/") ++ if pos < 0 { + // Invalid format, does not follow `/package.service/method`. +- return name, []attribute.KeyValue(nil) ++ return name, nil + } ++ service, method := name[:pos], name[pos+1:] + + var attrs []attribute.KeyValue +- if service := parts[0]; service != "" { +- attrs = append(attrs, semconv.RPCServiceKey.String(service)) ++ if service != "" { ++ attrs = append(attrs, semconv.RPCService(service)) + } +- if method := parts[1]; method != "" { +- attrs = append(attrs, semconv.RPCMethodKey.String(method)) ++ if method != "" { ++ attrs = append(attrs, semconv.RPCMethod(method)) + } + return name, attrs + } +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +new file mode 100644 +index 00000000000..f585fb6ae0c +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +@@ -0,0 +1,98 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" ++ ++import ( ++ "context" ++ ++ "google.golang.org/grpc/metadata" ++ ++ "go.opentelemetry.io/otel/baggage" ++ "go.opentelemetry.io/otel/propagation" ++ "go.opentelemetry.io/otel/trace" ++) ++ ++type metadataSupplier struct { ++ metadata *metadata.MD ++} ++ ++// assert that metadataSupplier implements the TextMapCarrier interface. ++var _ propagation.TextMapCarrier = &metadataSupplier{} ++ ++func (s *metadataSupplier) Get(key string) string { ++ values := s.metadata.Get(key) ++ if len(values) == 0 { ++ return "" ++ } ++ return values[0] ++} ++ ++func (s *metadataSupplier) Set(key string, value string) { ++ s.metadata.Set(key, value) ++} ++ ++func (s *metadataSupplier) Keys() []string { ++ out := make([]string, 0, len(*s.metadata)) ++ for key := range *s.metadata { ++ out = append(out, key) ++ } ++ return out ++} ++ ++// Inject injects correlation context and span context into the gRPC ++// metadata object. This function is meant to be used on outgoing ++// requests. ++// Deprecated: Unnecessary public func. ++func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { ++ c := newConfig(opts, "") ++ c.Propagators.Inject(ctx, &metadataSupplier{ ++ metadata: md, ++ }) ++} ++ ++func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { ++ md, ok := metadata.FromOutgoingContext(ctx) ++ if !ok { ++ md = metadata.MD{} ++ } ++ propagators.Inject(ctx, &metadataSupplier{ ++ metadata: &md, ++ }) ++ return metadata.NewOutgoingContext(ctx, md) ++} ++ ++// Extract returns the correlation context and span context that ++// another service encoded in the gRPC metadata object with Inject. ++// This function is meant to be used on incoming requests. ++// Deprecated: Unnecessary public func. ++func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { ++ c := newConfig(opts, "") ++ ctx = c.Propagators.Extract(ctx, &metadataSupplier{ ++ metadata: md, ++ }) ++ ++ return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) ++} ++ ++func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { ++ md, ok := metadata.FromIncomingContext(ctx) ++ if !ok { ++ md = metadata.MD{} ++ } ++ ++ return propagators.Extract(ctx, &metadataSupplier{ ++ metadata: &md, ++ }) ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +index 611c7f3017a..b65fab308f3 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +@@ -16,7 +16,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g + + import ( + "go.opentelemetry.io/otel/attribute" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + ) + + // Semantic conventions for attribute keys for gRPC. +@@ -41,7 +41,7 @@ const ( + // Semantic conventions for common RPC attributes. + var ( + // Semantic convention for gRPC as the remoting system. +- RPCSystemGRPC = semconv.RPCSystemKey.String("grpc") ++ RPCSystemGRPC = semconv.RPCSystemGRPC + + // Semantic convention for a message named message. + RPCNameMessage = RPCNameKey.String("message") +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +new file mode 100644 +index 00000000000..0211e55e003 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +@@ -0,0 +1,235 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" ++ ++import ( ++ "context" ++ "sync/atomic" ++ "time" ++ ++ grpc_codes "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/stats" ++ "google.golang.org/grpc/status" ++ ++ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" ++ "go.opentelemetry.io/otel/attribute" ++ "go.opentelemetry.io/otel/codes" ++ "go.opentelemetry.io/otel/metric" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ++ "go.opentelemetry.io/otel/trace" ++) ++ ++type gRPCContextKey struct{} ++ ++type gRPCContext struct { ++ messagesReceived int64 ++ messagesSent int64 ++ metricAttrs []attribute.KeyValue ++} ++ ++type serverHandler struct { ++ *config ++} ++ ++// NewServerHandler creates a stats.Handler for gRPC server. ++func NewServerHandler(opts ...Option) stats.Handler { ++ h := &serverHandler{ ++ config: newConfig(opts, "server"), ++ } ++ ++ return h ++} ++ ++// TagConn can attach some information to the given context. ++func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { ++ span := trace.SpanFromContext(ctx) ++ attrs := peerAttr(peerFromCtx(ctx)) ++ span.SetAttributes(attrs...) ++ return ctx ++} ++ ++// HandleConn processes the Conn stats. ++func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) { ++} ++ ++// TagRPC can attach some information to the given context. ++func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { ++ ctx = extract(ctx, h.config.Propagators) ++ ++ name, attrs := internal.ParseFullMethod(info.FullMethodName) ++ attrs = append(attrs, RPCSystemGRPC) ++ ctx, _ = h.tracer.Start( ++ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), ++ name, ++ trace.WithSpanKind(trace.SpanKindServer), ++ trace.WithAttributes(attrs...), ++ ) ++ ++ gctx := gRPCContext{ ++ metricAttrs: attrs, ++ } ++ return context.WithValue(ctx, gRPCContextKey{}, &gctx) ++} ++ ++// HandleRPC processes the RPC stats. ++func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { ++ h.handleRPC(ctx, rs) ++} ++ ++type clientHandler struct { ++ *config ++} ++ ++// NewClientHandler creates a stats.Handler for gRPC client. ++func NewClientHandler(opts ...Option) stats.Handler { ++ h := &clientHandler{ ++ config: newConfig(opts, "client"), ++ } ++ ++ return h ++} ++ ++// TagRPC can attach some information to the given context. ++func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { ++ name, attrs := internal.ParseFullMethod(info.FullMethodName) ++ attrs = append(attrs, RPCSystemGRPC) ++ ctx, _ = h.tracer.Start( ++ ctx, ++ name, ++ trace.WithSpanKind(trace.SpanKindClient), ++ trace.WithAttributes(attrs...), ++ ) ++ ++ gctx := gRPCContext{ ++ metricAttrs: attrs, ++ } ++ ++ return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) ++} ++ ++// HandleRPC processes the RPC stats. ++func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { ++ h.handleRPC(ctx, rs) ++} ++ ++// TagConn can attach some information to the given context. ++func (h *clientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { ++ span := trace.SpanFromContext(ctx) ++ attrs := peerAttr(cti.RemoteAddr.String()) ++ span.SetAttributes(attrs...) ++ return ctx ++} ++ ++// HandleConn processes the Conn stats. ++func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) { ++ // no-op ++} ++ ++func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { ++ span := trace.SpanFromContext(ctx) ++ gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) ++ var messageId int64 ++ metricAttrs := make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) ++ metricAttrs = append(metricAttrs, gctx.metricAttrs...) ++ wctx := withoutCancel(ctx) ++ ++ switch rs := rs.(type) { ++ case *stats.Begin: ++ case *stats.InPayload: ++ if gctx != nil { ++ messageId = atomic.AddInt64(&gctx.messagesReceived, 1) ++ c.rpcRequestSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) ++ } ++ ++ if c.ReceivedEvent { ++ span.AddEvent("message", ++ trace.WithAttributes( ++ semconv.MessageTypeReceived, ++ semconv.MessageIDKey.Int64(messageId), ++ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength), ++ semconv.MessageUncompressedSizeKey.Int(rs.Length), ++ ), ++ ) ++ } ++ case *stats.OutPayload: ++ if gctx != nil { ++ messageId = atomic.AddInt64(&gctx.messagesSent, 1) ++ c.rpcResponseSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) ++ } ++ ++ if c.SentEvent { ++ span.AddEvent("message", ++ trace.WithAttributes( ++ semconv.MessageTypeSent, ++ semconv.MessageIDKey.Int64(messageId), ++ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength), ++ semconv.MessageUncompressedSizeKey.Int(rs.Length), ++ ), ++ ) ++ } ++ case *stats.OutTrailer: ++ case *stats.End: ++ var rpcStatusAttr attribute.KeyValue ++ ++ if rs.Error != nil { ++ s, _ := status.FromError(rs.Error) ++ span.SetStatus(codes.Error, s.Message()) ++ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code())) ++ } else { ++ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK)) ++ } ++ span.SetAttributes(rpcStatusAttr) ++ span.End() ++ ++ metricAttrs = append(metricAttrs, rpcStatusAttr) ++ c.rpcDuration.Record(wctx, float64(rs.EndTime.Sub(rs.BeginTime)), metric.WithAttributes(metricAttrs...)) ++ c.rpcRequestsPerRPC.Record(wctx, gctx.messagesReceived, metric.WithAttributes(metricAttrs...)) ++ c.rpcResponsesPerRPC.Record(wctx, gctx.messagesSent, metric.WithAttributes(metricAttrs...)) ++ ++ default: ++ return ++ } ++} ++ ++func withoutCancel(parent context.Context) context.Context { ++ if parent == nil { ++ panic("cannot create context from nil parent") ++ } ++ return withoutCancelCtx{parent} ++} ++ ++type withoutCancelCtx struct { ++ c context.Context ++} ++ ++func (withoutCancelCtx) Deadline() (deadline time.Time, ok bool) { ++ return ++} ++ ++func (withoutCancelCtx) Done() <-chan struct{} { ++ return nil ++} ++ ++func (withoutCancelCtx) Err() error { ++ return nil ++} ++ ++func (w withoutCancelCtx) Value(key any) any { ++ return w.c.Value(key) ++} ++ ++func (w withoutCancelCtx) String() string { ++ return "withoutCancel" ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +index bf6b2aa1c11..5c13a7ceab4 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +@@ -16,11 +16,13 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g + + // Version is the current release version of the gRPC instrumentation. + func Version() string { +- return "0.35.0" ++ return "0.46.0" + // This string is updated by the pre_release.sh script during release + } + + // SemVersion is the semantic version to be supplied to tracer/meter creation. ++// ++// Deprecated: Use [Version] instead. + func SemVersion() string { +- return "semver:" + Version() ++ return Version() + } +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +index 728be09d0e0..303e5505e41 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +@@ -42,5 +42,5 @@ const ( + type Filter func(*http.Request) bool + + func newTracer(tp trace.TracerProvider) trace.Tracer { +- return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(SemVersion())) ++ return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(Version())) + } +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +index d0337f3a5e4..e4fa1b8d9d6 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +@@ -21,7 +21,6 @@ import ( + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" +- "go.opentelemetry.io/otel/metric/global" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" + ) +@@ -33,6 +32,7 @@ const ( + // config represents the configuration options available for the http.Handler + // and http.Transport types. + type config struct { ++ ServerName string + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator +@@ -64,7 +64,7 @@ func (o optionFunc) apply(c *config) { + func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), +- MeterProvider: global.MeterProvider(), ++ MeterProvider: otel.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) +@@ -77,7 +77,7 @@ func newConfig(opts ...Option) *config { + + c.Meter = c.MeterProvider.Meter( + instrumentationName, +- metric.WithInstrumentationVersion(SemVersion()), ++ metric.WithInstrumentationVersion(Version()), + ) + + return c +@@ -198,3 +198,11 @@ func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + c.ClientTrace = f + }) + } ++ ++// WithServerName returns an Option that sets the name of the (virtual) server ++// handling requests. ++func WithServerName(server string) Option { ++ return optionFunc(func(c *config) { ++ c.ServerName = server ++ }) ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +index 5b7d9daafa7..b2fbe07841c 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +@@ -21,25 +21,19 @@ import ( + + "github.com/felixge/httpsnoop" + ++ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +- "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/syncint64" + "go.opentelemetry.io/otel/propagation" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" + ) + +-var _ http.Handler = &Handler{} +- +-// Handler is http middleware that corresponds to the http.Handler interface and +-// is designed to wrap a http.Mux (or equivalent), while individual routes on +-// the mux are wrapped with WithRouteTag. A Handler will add various attributes +-// to the span using the attribute.Keys defined in this package. +-type Handler struct { ++// middleware is an http middleware which wraps the next handler in a span. ++type middleware struct { + operation string +- handler http.Handler ++ server string + + tracer trace.Tracer + meter metric.Meter +@@ -49,8 +43,8 @@ type Handler struct { + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string +- counters map[string]syncint64.Counter +- valueRecorders map[string]syncfloat64.Histogram ++ counters map[string]metric.Int64Counter ++ valueRecorders map[string]metric.Float64Histogram + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + } +@@ -59,11 +53,17 @@ func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation + } + +-// NewHandler wraps the passed handler, functioning like middleware, in a span +-// named after the operation and with any provided Options. ++// NewHandler wraps the passed handler in a span named after the operation and ++// enriches it with metrics. + func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { +- h := Handler{ +- handler: handler, ++ return NewMiddleware(operation, opts...)(handler) ++} ++ ++// NewMiddleware returns a tracing and metrics instrumentation middleware. ++// The handler returned by the middleware wraps a handler ++// in a span named after the operation and enriches it with metrics. ++func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { ++ h := middleware{ + operation: operation, + } + +@@ -76,10 +76,14 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han + h.configure(c) + h.createMeasures() + +- return &h ++ return func(next http.Handler) http.Handler { ++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ++ h.serveHTTP(w, r, next) ++ }) ++ } + } + +-func (h *Handler) configure(c *config) { ++func (h *middleware) configure(c *config) { + h.tracer = c.Tracer + h.meter = c.Meter + h.propagators = c.Propagators +@@ -90,6 +94,7 @@ func (h *Handler) configure(c *config) { + h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn ++ h.server = c.ServerName + } + + func handleErr(err error) { +@@ -98,17 +103,17 @@ func handleErr(err error) { + } + } + +-func (h *Handler) createMeasures() { +- h.counters = make(map[string]syncint64.Counter) +- h.valueRecorders = make(map[string]syncfloat64.Histogram) ++func (h *middleware) createMeasures() { ++ h.counters = make(map[string]metric.Int64Counter) ++ h.valueRecorders = make(map[string]metric.Float64Histogram) + +- requestBytesCounter, err := h.meter.SyncInt64().Counter(RequestContentLength) ++ requestBytesCounter, err := h.meter.Int64Counter(RequestContentLength) + handleErr(err) + +- responseBytesCounter, err := h.meter.SyncInt64().Counter(ResponseContentLength) ++ responseBytesCounter, err := h.meter.Int64Counter(ResponseContentLength) + handleErr(err) + +- serverLatencyMeasure, err := h.meter.SyncFloat64().Histogram(ServerLatency) ++ serverLatencyMeasure, err := h.meter.Float64Histogram(ServerLatency) + handleErr(err) + + h.counters[RequestContentLength] = requestBytesCounter +@@ -116,19 +121,27 @@ func (h *Handler) createMeasures() { + h.valueRecorders[ServerLatency] = serverLatencyMeasure + } + +-// ServeHTTP serves HTTP requests (http.Handler). +-func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ++// serveHTTP sets up tracing and calls the given next http.Handler with the span ++// context injected into the request context. ++func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request +- h.handler.ServeHTTP(w, r) ++ next.ServeHTTP(w, r) + return + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) +- opts := h.spanStartOptions ++ opts := []trace.SpanStartOption{ ++ trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), ++ } ++ if h.server != "" { ++ hostAttr := semconv.NetHostName(h.server) ++ opts = append(opts, trace.WithAttributes(hostAttr)) ++ } ++ opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. +@@ -137,12 +150,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + } + } + +- opts = append([]trace.SpanStartOption{ +- trace.WithAttributes(semconv.NetAttributesFromHTTPRequest("tcp", r)...), +- trace.WithAttributes(semconv.EndUserAttributesFromHTTPRequest(r)...), +- trace.WithAttributes(semconv.HTTPServerAttributesFromHTTPRequest(h.operation, "", r)...), +- }, opts...) // start with the configured options +- + tracer := h.tracer + + if tracer == nil { +@@ -180,7 +187,13 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + } + } + +- rww := &respWriterWrapper{ResponseWriter: w, record: writeRecordFunc, ctx: ctx, props: h.propagators} ++ rww := &respWriterWrapper{ ++ ResponseWriter: w, ++ record: writeRecordFunc, ++ ctx: ctx, ++ props: h.propagators, ++ statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything ++ } + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, +@@ -201,19 +214,23 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + +- h.handler.ServeHTTP(w, r.WithContext(ctx)) ++ next.ServeHTTP(w, r.WithContext(ctx)) + + setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) + + // Add metrics +- attributes := append(labeler.Get(), semconv.HTTPServerMetricAttributesFromHTTPRequest(h.operation, r)...) +- h.counters[RequestContentLength].Add(ctx, bw.read, attributes...) +- h.counters[ResponseContentLength].Add(ctx, rww.written, attributes...) ++ attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) ++ if rww.statusCode > 0 { ++ attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) ++ } ++ o := metric.WithAttributes(attributes...) ++ h.counters[RequestContentLength].Add(ctx, bw.read, o) ++ h.counters[ResponseContentLength].Add(ctx, rww.written, o) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + +- h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, attributes...) ++ h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, o) + } + + func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { +@@ -231,21 +248,28 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, + attributes = append(attributes, WroteBytesKey.Int64(wrote)) + } + if statusCode > 0 { +- attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...) +- span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindServer)) ++ attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } ++ span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) ++ + if werr != nil && werr != io.EOF { + attributes = append(attributes, WriteErrorKey.String(werr.Error())) + } + span.SetAttributes(attributes...) + } + +-// WithRouteTag annotates a span with the provided route name using the +-// RouteKey Tag. ++// WithRouteTag annotates spans and metrics with the provided route name ++// with HTTP route attribute. + func WithRouteTag(route string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ++ attr := semconv.HTTPRouteKey.String(route) ++ + span := trace.SpanFromContext(r.Context()) +- span.SetAttributes(semconv.HTTPRouteKey.String(route)) ++ span.SetAttributes(attr) ++ ++ labeler, _ := LabelerFromContext(r.Context()) ++ labeler.Add(attr) ++ + h.ServeHTTP(w, r) + }) + } +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +new file mode 100644 +index 00000000000..edf4ce3d315 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +@@ -0,0 +1,21 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" ++ ++// Generate semconvutil package: ++//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go ++//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go ++//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go ++//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +new file mode 100644 +index 00000000000..d3dede9ebbd +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +@@ -0,0 +1,552 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/semconvutil/httpconv.go.tmpl ++ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" ++ ++import ( ++ "fmt" ++ "net/http" ++ "strings" ++ ++ "go.opentelemetry.io/otel/attribute" ++ "go.opentelemetry.io/otel/codes" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ++) ++ ++// HTTPClientResponse returns trace attributes for an HTTP response received by a ++// client from a server. It will return the following attributes if the related ++// values are defined in resp: "http.status.code", ++// "http.response_content_length". ++// ++// This does not add all OpenTelemetry required attributes for an HTTP event, ++// it assumes ClientRequest was used to create the span with a complete set of ++// attributes. If a complete set of attributes can be generated using the ++// request contained in resp. For example: ++// ++// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) ++func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { ++ return hc.ClientResponse(resp) ++} ++ ++// HTTPClientRequest returns trace attributes for an HTTP request made by a client. ++// The following attributes are always returned: "http.url", "http.flavor", ++// "http.method", "net.peer.name". The following attributes are returned if the ++// related values are defined in req: "net.peer.port", "http.user_agent", ++// "http.request_content_length", "enduser.id". ++func HTTPClientRequest(req *http.Request) []attribute.KeyValue { ++ return hc.ClientRequest(req) ++} ++ ++// HTTPClientStatus returns a span status code and message for an HTTP status code ++// value received by a client. ++func HTTPClientStatus(code int) (codes.Code, string) { ++ return hc.ClientStatus(code) ++} ++ ++// HTTPServerRequest returns trace attributes for an HTTP request received by a ++// server. ++// ++// The server must be the primary server name if it is known. For example this ++// would be the ServerName directive ++// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache ++// server, and the server_name directive ++// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an ++// nginx server. More generically, the primary server name would be the host ++// header value that matches the default virtual host of an HTTP server. It ++// should include the host identifier and if a port is used to route to the ++// server that port identifier should be included as an appropriate port ++// suffix. ++// ++// If the primary server name is not known, server should be an empty string. ++// The req Host will be used to determine the server instead. ++// ++// The following attributes are always returned: "http.method", "http.scheme", ++// "http.flavor", "http.target", "net.host.name". The following attributes are ++// returned if they related values are defined in req: "net.host.port", ++// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", ++// "http.client_ip". ++func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { ++ return hc.ServerRequest(server, req) ++} ++ ++// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a ++// server. ++// ++// The server must be the primary server name if it is known. For example this ++// would be the ServerName directive ++// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache ++// server, and the server_name directive ++// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an ++// nginx server. More generically, the primary server name would be the host ++// header value that matches the default virtual host of an HTTP server. It ++// should include the host identifier and if a port is used to route to the ++// server that port identifier should be included as an appropriate port ++// suffix. ++// ++// If the primary server name is not known, server should be an empty string. ++// The req Host will be used to determine the server instead. ++// ++// The following attributes are always returned: "http.method", "http.scheme", ++// "http.flavor", "net.host.name". The following attributes are ++// returned if they related values are defined in req: "net.host.port". ++func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { ++ return hc.ServerRequestMetrics(server, req) ++} ++ ++// HTTPServerStatus returns a span status code and message for an HTTP status code ++// value returned by a server. Status codes in the 400-499 range are not ++// returned as errors. ++func HTTPServerStatus(code int) (codes.Code, string) { ++ return hc.ServerStatus(code) ++} ++ ++// HTTPRequestHeader returns the contents of h as attributes. ++// ++// Instrumentation should require an explicit configuration of which headers to ++// captured and then prune what they pass here. Including all headers can be a ++// security risk - explicit configuration helps avoid leaking sensitive ++// information. ++// ++// The User-Agent header is already captured in the http.user_agent attribute ++// from ClientRequest and ServerRequest. Instrumentation may provide an option ++// to capture that header here even though it is not recommended. Otherwise, ++// instrumentation should filter that out of what is passed. ++func HTTPRequestHeader(h http.Header) []attribute.KeyValue { ++ return hc.RequestHeader(h) ++} ++ ++// HTTPResponseHeader returns the contents of h as attributes. ++// ++// Instrumentation should require an explicit configuration of which headers to ++// captured and then prune what they pass here. Including all headers can be a ++// security risk - explicit configuration helps avoid leaking sensitive ++// information. ++// ++// The User-Agent header is already captured in the http.user_agent attribute ++// from ClientRequest and ServerRequest. Instrumentation may provide an option ++// to capture that header here even though it is not recommended. Otherwise, ++// instrumentation should filter that out of what is passed. ++func HTTPResponseHeader(h http.Header) []attribute.KeyValue { ++ return hc.ResponseHeader(h) ++} ++ ++// httpConv are the HTTP semantic convention attributes defined for a version ++// of the OpenTelemetry specification. ++type httpConv struct { ++ NetConv *netConv ++ ++ EnduserIDKey attribute.Key ++ HTTPClientIPKey attribute.Key ++ HTTPFlavorKey attribute.Key ++ HTTPMethodKey attribute.Key ++ HTTPRequestContentLengthKey attribute.Key ++ HTTPResponseContentLengthKey attribute.Key ++ HTTPRouteKey attribute.Key ++ HTTPSchemeHTTP attribute.KeyValue ++ HTTPSchemeHTTPS attribute.KeyValue ++ HTTPStatusCodeKey attribute.Key ++ HTTPTargetKey attribute.Key ++ HTTPURLKey attribute.Key ++ HTTPUserAgentKey attribute.Key ++} ++ ++var hc = &httpConv{ ++ NetConv: nc, ++ ++ EnduserIDKey: semconv.EnduserIDKey, ++ HTTPClientIPKey: semconv.HTTPClientIPKey, ++ HTTPFlavorKey: semconv.HTTPFlavorKey, ++ HTTPMethodKey: semconv.HTTPMethodKey, ++ HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, ++ HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, ++ HTTPRouteKey: semconv.HTTPRouteKey, ++ HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, ++ HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, ++ HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, ++ HTTPTargetKey: semconv.HTTPTargetKey, ++ HTTPURLKey: semconv.HTTPURLKey, ++ HTTPUserAgentKey: semconv.HTTPUserAgentKey, ++} ++ ++// ClientResponse returns attributes for an HTTP response received by a client ++// from a server. The following attributes are returned if the related values ++// are defined in resp: "http.status.code", "http.response_content_length". ++// ++// This does not add all OpenTelemetry required attributes for an HTTP event, ++// it assumes ClientRequest was used to create the span with a complete set of ++// attributes. If a complete set of attributes can be generated using the ++// request contained in resp. For example: ++// ++// append(ClientResponse(resp), ClientRequest(resp.Request)...) ++func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { ++ var n int ++ if resp.StatusCode > 0 { ++ n++ ++ } ++ if resp.ContentLength > 0 { ++ n++ ++ } ++ ++ attrs := make([]attribute.KeyValue, 0, n) ++ if resp.StatusCode > 0 { ++ attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) ++ } ++ if resp.ContentLength > 0 { ++ attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) ++ } ++ return attrs ++} ++ ++// ClientRequest returns attributes for an HTTP request made by a client. The ++// following attributes are always returned: "http.url", "http.flavor", ++// "http.method", "net.peer.name". The following attributes are returned if the ++// related values are defined in req: "net.peer.port", "http.user_agent", ++// "http.request_content_length", "enduser.id". ++func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { ++ n := 3 // URL, peer name, proto, and method. ++ var h string ++ if req.URL != nil { ++ h = req.URL.Host ++ } ++ peer, p := firstHostPort(h, req.Header.Get("Host")) ++ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) ++ if port > 0 { ++ n++ ++ } ++ useragent := req.UserAgent() ++ if useragent != "" { ++ n++ ++ } ++ if req.ContentLength > 0 { ++ n++ ++ } ++ userID, _, hasUserID := req.BasicAuth() ++ if hasUserID { ++ n++ ++ } ++ attrs := make([]attribute.KeyValue, 0, n) ++ ++ attrs = append(attrs, c.method(req.Method)) ++ attrs = append(attrs, c.flavor(req.Proto)) ++ ++ var u string ++ if req.URL != nil { ++ // Remove any username/password info that may be in the URL. ++ userinfo := req.URL.User ++ req.URL.User = nil ++ u = req.URL.String() ++ // Restore any username/password info that was removed. ++ req.URL.User = userinfo ++ } ++ attrs = append(attrs, c.HTTPURLKey.String(u)) ++ ++ attrs = append(attrs, c.NetConv.PeerName(peer)) ++ if port > 0 { ++ attrs = append(attrs, c.NetConv.PeerPort(port)) ++ } ++ ++ if useragent != "" { ++ attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) ++ } ++ ++ if l := req.ContentLength; l > 0 { ++ attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) ++ } ++ ++ if hasUserID { ++ attrs = append(attrs, c.EnduserIDKey.String(userID)) ++ } ++ ++ return attrs ++} ++ ++// ServerRequest returns attributes for an HTTP request received by a server. ++// ++// The server must be the primary server name if it is known. For example this ++// would be the ServerName directive ++// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache ++// server, and the server_name directive ++// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an ++// nginx server. More generically, the primary server name would be the host ++// header value that matches the default virtual host of an HTTP server. It ++// should include the host identifier and if a port is used to route to the ++// server that port identifier should be included as an appropriate port ++// suffix. ++// ++// If the primary server name is not known, server should be an empty string. ++// The req Host will be used to determine the server instead. ++// ++// The following attributes are always returned: "http.method", "http.scheme", ++// "http.flavor", "http.target", "net.host.name". The following attributes are ++// returned if they related values are defined in req: "net.host.port", ++// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", ++// "http.client_ip". ++func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { ++ // TODO: This currently does not add the specification required ++ // `http.target` attribute. It has too high of a cardinality to safely be ++ // added. An alternate should be added, or this comment removed, when it is ++ // addressed by the specification. If it is ultimately decided to continue ++ // not including the attribute, the HTTPTargetKey field of the httpConv ++ // should be removed as well. ++ ++ n := 4 // Method, scheme, proto, and host name. ++ var host string ++ var p int ++ if server == "" { ++ host, p = splitHostPort(req.Host) ++ } else { ++ // Prioritize the primary server name. ++ host, p = splitHostPort(server) ++ if p < 0 { ++ _, p = splitHostPort(req.Host) ++ } ++ } ++ hostPort := requiredHTTPPort(req.TLS != nil, p) ++ if hostPort > 0 { ++ n++ ++ } ++ peer, peerPort := splitHostPort(req.RemoteAddr) ++ if peer != "" { ++ n++ ++ if peerPort > 0 { ++ n++ ++ } ++ } ++ useragent := req.UserAgent() ++ if useragent != "" { ++ n++ ++ } ++ userID, _, hasUserID := req.BasicAuth() ++ if hasUserID { ++ n++ ++ } ++ clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) ++ if clientIP != "" { ++ n++ ++ } ++ attrs := make([]attribute.KeyValue, 0, n) ++ ++ attrs = append(attrs, c.method(req.Method)) ++ attrs = append(attrs, c.scheme(req.TLS != nil)) ++ attrs = append(attrs, c.flavor(req.Proto)) ++ attrs = append(attrs, c.NetConv.HostName(host)) ++ ++ if hostPort > 0 { ++ attrs = append(attrs, c.NetConv.HostPort(hostPort)) ++ } ++ ++ if peer != "" { ++ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a ++ // file-path that would be interpreted with a sock family. ++ attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) ++ if peerPort > 0 { ++ attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) ++ } ++ } ++ ++ if useragent != "" { ++ attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) ++ } ++ ++ if hasUserID { ++ attrs = append(attrs, c.EnduserIDKey.String(userID)) ++ } ++ ++ if clientIP != "" { ++ attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) ++ } ++ ++ return attrs ++} ++ ++// ServerRequestMetrics returns metric attributes for an HTTP request received ++// by a server. ++// ++// The server must be the primary server name if it is known. For example this ++// would be the ServerName directive ++// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache ++// server, and the server_name directive ++// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an ++// nginx server. More generically, the primary server name would be the host ++// header value that matches the default virtual host of an HTTP server. It ++// should include the host identifier and if a port is used to route to the ++// server that port identifier should be included as an appropriate port ++// suffix. ++// ++// If the primary server name is not known, server should be an empty string. ++// The req Host will be used to determine the server instead. ++// ++// The following attributes are always returned: "http.method", "http.scheme", ++// "http.flavor", "net.host.name". The following attributes are ++// returned if they related values are defined in req: "net.host.port". ++func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { ++ // TODO: This currently does not add the specification required ++ // `http.target` attribute. It has too high of a cardinality to safely be ++ // added. An alternate should be added, or this comment removed, when it is ++ // addressed by the specification. If it is ultimately decided to continue ++ // not including the attribute, the HTTPTargetKey field of the httpConv ++ // should be removed as well. ++ ++ n := 4 // Method, scheme, proto, and host name. ++ var host string ++ var p int ++ if server == "" { ++ host, p = splitHostPort(req.Host) ++ } else { ++ // Prioritize the primary server name. ++ host, p = splitHostPort(server) ++ if p < 0 { ++ _, p = splitHostPort(req.Host) ++ } ++ } ++ hostPort := requiredHTTPPort(req.TLS != nil, p) ++ if hostPort > 0 { ++ n++ ++ } ++ attrs := make([]attribute.KeyValue, 0, n) ++ ++ attrs = append(attrs, c.methodMetric(req.Method)) ++ attrs = append(attrs, c.scheme(req.TLS != nil)) ++ attrs = append(attrs, c.flavor(req.Proto)) ++ attrs = append(attrs, c.NetConv.HostName(host)) ++ ++ if hostPort > 0 { ++ attrs = append(attrs, c.NetConv.HostPort(hostPort)) ++ } ++ ++ return attrs ++} ++ ++func (c *httpConv) method(method string) attribute.KeyValue { ++ if method == "" { ++ return c.HTTPMethodKey.String(http.MethodGet) ++ } ++ return c.HTTPMethodKey.String(method) ++} ++ ++func (c *httpConv) methodMetric(method string) attribute.KeyValue { ++ method = strings.ToUpper(method) ++ switch method { ++ case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: ++ default: ++ method = "_OTHER" ++ } ++ return c.HTTPMethodKey.String(method) ++} ++ ++func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive ++ if https { ++ return c.HTTPSchemeHTTPS ++ } ++ return c.HTTPSchemeHTTP ++} ++ ++func (c *httpConv) flavor(proto string) attribute.KeyValue { ++ switch proto { ++ case "HTTP/1.0": ++ return c.HTTPFlavorKey.String("1.0") ++ case "HTTP/1.1": ++ return c.HTTPFlavorKey.String("1.1") ++ case "HTTP/2": ++ return c.HTTPFlavorKey.String("2.0") ++ case "HTTP/3": ++ return c.HTTPFlavorKey.String("3.0") ++ default: ++ return c.HTTPFlavorKey.String(proto) ++ } ++} ++ ++func serverClientIP(xForwardedFor string) string { ++ if idx := strings.Index(xForwardedFor, ","); idx >= 0 { ++ xForwardedFor = xForwardedFor[:idx] ++ } ++ return xForwardedFor ++} ++ ++func requiredHTTPPort(https bool, port int) int { // nolint:revive ++ if https { ++ if port > 0 && port != 443 { ++ return port ++ } ++ } else { ++ if port > 0 && port != 80 { ++ return port ++ } ++ } ++ return -1 ++} ++ ++// Return the request host and port from the first non-empty source. ++func firstHostPort(source ...string) (host string, port int) { ++ for _, hostport := range source { ++ host, port = splitHostPort(hostport) ++ if host != "" || port > 0 { ++ break ++ } ++ } ++ return ++} ++ ++// RequestHeader returns the contents of h as OpenTelemetry attributes. ++func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue { ++ return c.header("http.request.header", h) ++} ++ ++// ResponseHeader returns the contents of h as OpenTelemetry attributes. ++func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue { ++ return c.header("http.response.header", h) ++} ++ ++func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue { ++ key := func(k string) attribute.Key { ++ k = strings.ToLower(k) ++ k = strings.ReplaceAll(k, "-", "_") ++ k = fmt.Sprintf("%s.%s", prefix, k) ++ return attribute.Key(k) ++ } ++ ++ attrs := make([]attribute.KeyValue, 0, len(h)) ++ for k, v := range h { ++ attrs = append(attrs, key(k).StringSlice(v)) ++ } ++ return attrs ++} ++ ++// ClientStatus returns a span status code and message for an HTTP status code ++// value received by a client. ++func (c *httpConv) ClientStatus(code int) (codes.Code, string) { ++ if code < 100 || code >= 600 { ++ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) ++ } ++ if code >= 400 { ++ return codes.Error, "" ++ } ++ return codes.Unset, "" ++} ++ ++// ServerStatus returns a span status code and message for an HTTP status code ++// value returned by a server. Status codes in the 400-499 range are not ++// returned as errors. ++func (c *httpConv) ServerStatus(code int) (codes.Code, string) { ++ if code < 100 || code >= 600 { ++ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) ++ } ++ if code >= 500 { ++ return codes.Error, "" ++ } ++ return codes.Unset, "" ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +new file mode 100644 +index 00000000000..bde8893437d +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +@@ -0,0 +1,368 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/semconvutil/netconv.go.tmpl ++ ++// Copyright The OpenTelemetry Authors ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" ++ ++import ( ++ "net" ++ "strconv" ++ "strings" ++ ++ "go.opentelemetry.io/otel/attribute" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ++) ++ ++// NetTransport returns a trace attribute describing the transport protocol of the ++// passed network. See the net.Dial for information about acceptable network ++// values. ++func NetTransport(network string) attribute.KeyValue { ++ return nc.Transport(network) ++} ++ ++// NetClient returns trace attributes for a client network connection to address. ++// See net.Dial for information about acceptable address values, address should ++// be the same as the one used to create conn. If conn is nil, only network ++// peer attributes will be returned that describe address. Otherwise, the ++// socket level information about conn will also be included. ++func NetClient(address string, conn net.Conn) []attribute.KeyValue { ++ return nc.Client(address, conn) ++} ++ ++// NetServer returns trace attributes for a network listener listening at address. ++// See net.Listen for information about acceptable address values, address ++// should be the same as the one used to create ln. If ln is nil, only network ++// host attributes will be returned that describe address. Otherwise, the ++// socket level information about ln will also be included. ++func NetServer(address string, ln net.Listener) []attribute.KeyValue { ++ return nc.Server(address, ln) ++} ++ ++// netConv are the network semantic convention attributes defined for a version ++// of the OpenTelemetry specification. ++type netConv struct { ++ NetHostNameKey attribute.Key ++ NetHostPortKey attribute.Key ++ NetPeerNameKey attribute.Key ++ NetPeerPortKey attribute.Key ++ NetSockFamilyKey attribute.Key ++ NetSockPeerAddrKey attribute.Key ++ NetSockPeerPortKey attribute.Key ++ NetSockHostAddrKey attribute.Key ++ NetSockHostPortKey attribute.Key ++ NetTransportOther attribute.KeyValue ++ NetTransportTCP attribute.KeyValue ++ NetTransportUDP attribute.KeyValue ++ NetTransportInProc attribute.KeyValue ++} ++ ++var nc = &netConv{ ++ NetHostNameKey: semconv.NetHostNameKey, ++ NetHostPortKey: semconv.NetHostPortKey, ++ NetPeerNameKey: semconv.NetPeerNameKey, ++ NetPeerPortKey: semconv.NetPeerPortKey, ++ NetSockFamilyKey: semconv.NetSockFamilyKey, ++ NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, ++ NetSockPeerPortKey: semconv.NetSockPeerPortKey, ++ NetSockHostAddrKey: semconv.NetSockHostAddrKey, ++ NetSockHostPortKey: semconv.NetSockHostPortKey, ++ NetTransportOther: semconv.NetTransportOther, ++ NetTransportTCP: semconv.NetTransportTCP, ++ NetTransportUDP: semconv.NetTransportUDP, ++ NetTransportInProc: semconv.NetTransportInProc, ++} ++ ++func (c *netConv) Transport(network string) attribute.KeyValue { ++ switch network { ++ case "tcp", "tcp4", "tcp6": ++ return c.NetTransportTCP ++ case "udp", "udp4", "udp6": ++ return c.NetTransportUDP ++ case "unix", "unixgram", "unixpacket": ++ return c.NetTransportInProc ++ default: ++ // "ip:*", "ip4:*", and "ip6:*" all are considered other. ++ return c.NetTransportOther ++ } ++} ++ ++// Host returns attributes for a network host address. ++func (c *netConv) Host(address string) []attribute.KeyValue { ++ h, p := splitHostPort(address) ++ var n int ++ if h != "" { ++ n++ ++ if p > 0 { ++ n++ ++ } ++ } ++ ++ if n == 0 { ++ return nil ++ } ++ ++ attrs := make([]attribute.KeyValue, 0, n) ++ attrs = append(attrs, c.HostName(h)) ++ if p > 0 { ++ attrs = append(attrs, c.HostPort(int(p))) ++ } ++ return attrs ++} ++ ++// Server returns attributes for a network listener listening at address. See ++// net.Listen for information about acceptable address values, address should ++// be the same as the one used to create ln. If ln is nil, only network host ++// attributes will be returned that describe address. Otherwise, the socket ++// level information about ln will also be included. ++func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { ++ if ln == nil { ++ return c.Host(address) ++ } ++ ++ lAddr := ln.Addr() ++ if lAddr == nil { ++ return c.Host(address) ++ } ++ ++ hostName, hostPort := splitHostPort(address) ++ sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) ++ network := lAddr.Network() ++ sockFamily := family(network, sockHostAddr) ++ ++ n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) ++ n += positiveInt(hostPort, sockHostPort) ++ attr := make([]attribute.KeyValue, 0, n) ++ if hostName != "" { ++ attr = append(attr, c.HostName(hostName)) ++ if hostPort > 0 { ++ // Only if net.host.name is set should net.host.port be. ++ attr = append(attr, c.HostPort(hostPort)) ++ } ++ } ++ if network != "" { ++ attr = append(attr, c.Transport(network)) ++ } ++ if sockFamily != "" { ++ attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) ++ } ++ if sockHostAddr != "" { ++ attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) ++ if sockHostPort > 0 { ++ // Only if net.sock.host.addr is set should net.sock.host.port be. ++ attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) ++ } ++ } ++ return attr ++} ++ ++func (c *netConv) HostName(name string) attribute.KeyValue { ++ return c.NetHostNameKey.String(name) ++} ++ ++func (c *netConv) HostPort(port int) attribute.KeyValue { ++ return c.NetHostPortKey.Int(port) ++} ++ ++// Client returns attributes for a client network connection to address. See ++// net.Dial for information about acceptable address values, address should be ++// the same as the one used to create conn. If conn is nil, only network peer ++// attributes will be returned that describe address. Otherwise, the socket ++// level information about conn will also be included. ++func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue { ++ if conn == nil { ++ return c.Peer(address) ++ } ++ ++ lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() ++ ++ var network string ++ switch { ++ case lAddr != nil: ++ network = lAddr.Network() ++ case rAddr != nil: ++ network = rAddr.Network() ++ default: ++ return c.Peer(address) ++ } ++ ++ peerName, peerPort := splitHostPort(address) ++ var ( ++ sockFamily string ++ sockPeerAddr string ++ sockPeerPort int ++ sockHostAddr string ++ sockHostPort int ++ ) ++ ++ if lAddr != nil { ++ sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) ++ } ++ ++ if rAddr != nil { ++ sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) ++ } ++ ++ switch { ++ case sockHostAddr != "": ++ sockFamily = family(network, sockHostAddr) ++ case sockPeerAddr != "": ++ sockFamily = family(network, sockPeerAddr) ++ } ++ ++ n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) ++ n += positiveInt(peerPort, sockPeerPort, sockHostPort) ++ attr := make([]attribute.KeyValue, 0, n) ++ if peerName != "" { ++ attr = append(attr, c.PeerName(peerName)) ++ if peerPort > 0 { ++ // Only if net.peer.name is set should net.peer.port be. ++ attr = append(attr, c.PeerPort(peerPort)) ++ } ++ } ++ if network != "" { ++ attr = append(attr, c.Transport(network)) ++ } ++ if sockFamily != "" { ++ attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) ++ } ++ if sockPeerAddr != "" { ++ attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) ++ if sockPeerPort > 0 { ++ // Only if net.sock.peer.addr is set should net.sock.peer.port be. ++ attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) ++ } ++ } ++ if sockHostAddr != "" { ++ attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) ++ if sockHostPort > 0 { ++ // Only if net.sock.host.addr is set should net.sock.host.port be. ++ attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) ++ } ++ } ++ return attr ++} ++ ++func family(network, address string) string { ++ switch network { ++ case "unix", "unixgram", "unixpacket": ++ return "unix" ++ default: ++ if ip := net.ParseIP(address); ip != nil { ++ if ip.To4() == nil { ++ return "inet6" ++ } ++ return "inet" ++ } ++ } ++ return "" ++} ++ ++func nonZeroStr(strs ...string) int { ++ var n int ++ for _, str := range strs { ++ if str != "" { ++ n++ ++ } ++ } ++ return n ++} ++ ++func positiveInt(ints ...int) int { ++ var n int ++ for _, i := range ints { ++ if i > 0 { ++ n++ ++ } ++ } ++ return n ++} ++ ++// Peer returns attributes for a network peer address. ++func (c *netConv) Peer(address string) []attribute.KeyValue { ++ h, p := splitHostPort(address) ++ var n int ++ if h != "" { ++ n++ ++ if p > 0 { ++ n++ ++ } ++ } ++ ++ if n == 0 { ++ return nil ++ } ++ ++ attrs := make([]attribute.KeyValue, 0, n) ++ attrs = append(attrs, c.PeerName(h)) ++ if p > 0 { ++ attrs = append(attrs, c.PeerPort(int(p))) ++ } ++ return attrs ++} ++ ++func (c *netConv) PeerName(name string) attribute.KeyValue { ++ return c.NetPeerNameKey.String(name) ++} ++ ++func (c *netConv) PeerPort(port int) attribute.KeyValue { ++ return c.NetPeerPortKey.Int(port) ++} ++ ++func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { ++ return c.NetSockPeerAddrKey.String(addr) ++} ++ ++func (c *netConv) SockPeerPort(port int) attribute.KeyValue { ++ return c.NetSockPeerPortKey.Int(port) ++} ++ ++// splitHostPort splits a network address hostport of the form "host", ++// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", ++// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and ++// port. ++// ++// An empty host is returned if it is not provided or unparsable. A negative ++// port is returned if it is not provided or unparsable. ++func splitHostPort(hostport string) (host string, port int) { ++ port = -1 ++ ++ if strings.HasPrefix(hostport, "[") { ++ addrEnd := strings.LastIndex(hostport, "]") ++ if addrEnd < 0 { ++ // Invalid hostport. ++ return ++ } ++ if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { ++ host = hostport[1:addrEnd] ++ return ++ } ++ } else { ++ if i := strings.LastIndex(hostport, ":"); i < 0 { ++ host = hostport ++ return ++ } ++ } ++ ++ host, pStr, err := net.SplitHostPort(hostport) ++ if err != nil { ++ return ++ } ++ ++ p, err := strconv.ParseUint(pStr, 10, 16) ++ if err != nil { ++ return ++ } ++ return host, int(p) ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +index fd5e1e9bc75..e835cac12e4 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +@@ -20,10 +20,10 @@ import ( + "net/http" + "net/http/httptrace" + ++ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + "go.opentelemetry.io/otel/trace" + ) + +@@ -109,8 +109,8 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + +- r = r.WithContext(ctx) +- span.SetAttributes(semconv.HTTPClientAttributesFromHTTPRequest(r)...) ++ r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. ++ span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) +@@ -121,8 +121,8 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + return res, err + } + +- span.SetAttributes(semconv.HTTPAttributesFromHTTPStatusCode(res.StatusCode)...) +- span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(res.StatusCode)) ++ span.SetAttributes(semconvutil.HTTPClientResponse(res)...) ++ span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + res.Body = newWrappedBody(span, res.Body) + + return res, err +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +index 210ee0b787f..8f3f53a9588 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +@@ -16,11 +16,13 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http + + // Version is the current release version of the otelhttp instrumentation. + func Version() string { +- return "0.35.1" ++ return "0.44.0" + // This string is updated by the pre_release.sh script during release + } + + // SemVersion is the semantic version to be supplied to tracer/meter creation. ++// ++// Deprecated: Use [Version] instead. + func SemVersion() string { +- return "semver:" + Version() ++ return Version() + } +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +index da6468c4e59..11a35ed167f 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +@@ -50,7 +50,7 @@ func (w *bodyWrapper) Close() error { + var _ http.ResponseWriter = &respWriterWrapper{} + + // respWriterWrapper wraps a http.ResponseWriter in order to track the number of +-// bytes written, the last error, and to catch the returned statusCode ++// bytes written, the last error, and to catch the first written statusCode. + // TODO: The wrapped http.ResponseWriter doesn't implement any of the optional + // types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) + // that may be useful when using it in real life situations. +@@ -85,11 +85,15 @@ func (w *respWriterWrapper) Write(p []byte) (int, error) { + return n, err + } + ++// WriteHeader persists initial statusCode for span attribution. ++// All calls to WriteHeader will be propagated to the underlying ResponseWriter ++// and will persist the statusCode from the first call. ++// Blocking consecutive calls to WriteHeader alters expected behavior and will ++// remove warning logs from net/http where developers will notice incorrect handler implementations. + func (w *respWriterWrapper) WriteHeader(statusCode int) { +- if w.wroteHeader { +- return ++ if !w.wroteHeader { ++ w.wroteHeader = true ++ w.statusCode = statusCode + } +- w.wroteHeader = true +- w.statusCode = statusCode + w.ResponseWriter.WriteHeader(statusCode) + } +diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore +new file mode 100644 +index 00000000000..ae6a3bcf12c +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/.codespellignore +@@ -0,0 +1,5 @@ ++ot ++fo ++te ++collison ++consequentially +diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc +new file mode 100644 +index 00000000000..4afbb1fb3bd +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/.codespellrc +@@ -0,0 +1,10 @@ ++# https://github.com/codespell-project/codespell ++[codespell] ++builtin = clear,rare,informal ++check-filenames = ++check-hidden = ++ignore-words = .codespellignore ++interactive = 1 ++skip = .git,go.mod,go.sum,semconv,venv,.tools ++uri-ignore-words-list = * ++write = +diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore +index 0b605b3d67d..9248055655b 100644 +--- a/vendor/go.opentelemetry.io/otel/.gitignore ++++ b/vendor/go.opentelemetry.io/otel/.gitignore +@@ -2,20 +2,25 @@ + Thumbs.db + + .tools/ ++venv/ + .idea/ + .vscode/ + *.iml + *.so + coverage.* ++go.work ++go.work.sum + + gen/ + ++/example/dice/dice + /example/fib/fib + /example/fib/traces.txt + /example/jaeger/jaeger + /example/namedtracer/namedtracer ++/example/otel-collector/otel-collector + /example/opencensus/opencensus + /example/passthrough/passthrough + /example/prometheus/prometheus ++/example/view/view + /example/zipkin/zipkin +-/example/otel-collector/otel-collector +diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml +index 253e3b35b52..a62511f382e 100644 +--- a/vendor/go.opentelemetry.io/otel/.golangci.yml ++++ b/vendor/go.opentelemetry.io/otel/.golangci.yml +@@ -9,22 +9,20 @@ linters: + disable-all: true + # Specifically enable linters we want to use. + enable: +- - deadcode + - depguard + - errcheck + - godot +- - gofmt ++ - gofumpt + - goimports ++ - gosec + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck +- - structcheck + - typecheck + - unused +- - varcheck + + issues: + # Maximum issues count per one linter. +@@ -56,6 +54,20 @@ issues: + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive ++ # It's okay to not run gosec in a test. ++ - path: _test\.go ++ linters: ++ - gosec ++ # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) ++ # as we commonly use it in tests and examples. ++ - text: "G404:" ++ linters: ++ - gosec ++ # Igonoring gosec G402: TLS MinVersion too low ++ # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. ++ - text: "G402: TLS MinVersion too low." ++ linters: ++ - gosec + include: + # revive exported should have comment or be unexported. + - EXC0012 +@@ -64,30 +76,67 @@ issues: + + linters-settings: + depguard: +- # Check the list against standard lib. +- # Default: false +- include-go-root: true +- # A list of packages for the list type specified. +- # Default: [] +- packages: +- - "crypto/md5" +- - "crypto/sha1" +- - "crypto/**/pkix" +- ignore-file-rules: +- - "**/*_test.go" +- additional-guards: +- # Do not allow testing packages in non-test files. +- - list-type: denylist +- include-go-root: true +- packages: +- - testing +- - github.com/stretchr/testify +- ignore-file-rules: +- - "**/*_test.go" +- - "**/*test/*.go" +- - "**/internal/matchers/*.go" ++ rules: ++ non-tests: ++ files: ++ - "!$test" ++ - "!**/*test/*.go" ++ - "!**/internal/matchers/*.go" ++ deny: ++ - pkg: "testing" ++ - pkg: "github.com/stretchr/testify" ++ - pkg: "crypto/md5" ++ - pkg: "crypto/sha1" ++ - pkg: "crypto/**/pkix" ++ otlp-internal: ++ files: ++ - "!**/exporters/otlp/internal/**/*.go" ++ deny: ++ - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" ++ desc: Do not use cross-module internal packages. ++ otlptrace-internal: ++ files: ++ - "!**/exporters/otlp/otlptrace/*.go" ++ - "!**/exporters/otlp/otlptrace/internal/**.go" ++ deny: ++ - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" ++ desc: Do not use cross-module internal packages. ++ otlpmetric-internal: ++ files: ++ - "!**/exporters/otlp/otlpmetric/internal/*.go" ++ - "!**/exporters/otlp/otlpmetric/internal/**/*.go" ++ deny: ++ - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" ++ desc: Do not use cross-module internal packages. ++ otel-internal: ++ files: ++ - "**/sdk/*.go" ++ - "**/sdk/**/*.go" ++ - "**/exporters/*.go" ++ - "**/exporters/**/*.go" ++ - "**/schema/*.go" ++ - "**/schema/**/*.go" ++ - "**/metric/*.go" ++ - "**/metric/**/*.go" ++ - "**/bridge/*.go" ++ - "**/bridge/**/*.go" ++ - "**/example/*.go" ++ - "**/example/**/*.go" ++ - "**/trace/*.go" ++ - "**/trace/**/*.go" ++ deny: ++ - pkg: "go.opentelemetry.io/otel/internal$" ++ desc: Do not use cross-module internal packages. ++ - pkg: "go.opentelemetry.io/otel/internal/attribute" ++ desc: Do not use cross-module internal packages. ++ - pkg: "go.opentelemetry.io/otel/internal/internaltest" ++ desc: Do not use cross-module internal packages. ++ - pkg: "go.opentelemetry.io/otel/internal/matchers" ++ desc: Do not use cross-module internal packages. + godot: + exclude: ++ # Exclude links. ++ - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. +@@ -114,8 +163,9 @@ linters-settings: + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument ++ # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument +- disabled: false ++ disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type +diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore +index 545d634525d..40d62fa2eb8 100644 +--- a/vendor/go.opentelemetry.io/otel/.lycheeignore ++++ b/vendor/go.opentelemetry.io/otel/.lycheeignore +@@ -1,3 +1,6 @@ + http://localhost + http://jaeger-collector + https://github.com/open-telemetry/opentelemetry-go/milestone/ ++https://github.com/open-telemetry/opentelemetry-go/projects ++file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries ++file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual +diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md +index 906e17ce94f..c4e7ad475f5 100644 +--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md ++++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md +@@ -8,6 +8,820 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm + + ## [Unreleased] + ++## [1.20.0/0.43.0] 2023-11-10 ++ ++This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ++ ++### Added ++ ++- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) ++- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) ++- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) ++- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) ++- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) ++- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) ++- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) ++- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) ++- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) ++- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) ++- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) ++- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) ++- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) ++ ++### Deprecated ++ ++- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) ++- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) ++- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. ++ Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) ++- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) ++- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) ++ ++### Changed ++ ++- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) ++- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. ++ This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. ++ Implementors need to update their implementations based on what they want the default behavior of the interface to be. ++ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) ++- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. ++ This extends the `Tracer` interface and is is a breaking change for any existing implementation. ++ Implementors need to update their implementations based on what they want the default behavior of the interface to be. ++ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) ++- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. ++ This extends the `Span` interface and is is a breaking change for any existing implementation. ++ Implementors need to update their implementations based on what they want the default behavior of the interface to be. ++ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) ++- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) ++- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) ++- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) ++- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) ++- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) ++- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) ++- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) ++- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) ++ ++### Fixed ++ ++- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) ++- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) ++- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) ++- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) ++- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) ++- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) ++- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) ++- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) ++- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) ++ ++## [1.19.0/0.42.0/0.0.7] 2023-09-28 ++ ++This release contains the first stable release of the OpenTelemetry Go [metric SDK]. ++Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++### Added ++ ++- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) ++- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) ++ ++### Changed ++ ++- Allow '/' characters in metric instrument names. (#4501) ++- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) ++- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) ++ ++### Fixed ++ ++- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) ++ ++### Removed ++ ++- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) ++ ++## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 ++ ++This is a release candidate for the v1.19.0/v0.42.0 release. ++That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++### Changed ++ ++- Allow '/' characters in metric instrument names. (#4501) ++ ++### Fixed ++ ++- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) ++ ++## [1.18.0/0.41.0/0.0.6] 2023-09-12 ++ ++This release drops the compatibility guarantee of [Go 1.19]. ++ ++### Added ++ ++- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) ++- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) ++ ++### Changed ++ ++- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) ++ ++### Deprecated ++ ++- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). ++ The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) ++ ++### Removed ++ ++- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) ++- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) ++- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) ++- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) ++- Dropped guaranteed support for versions of Go less than 1.20. (#4481) ++ ++## [1.17.0/0.40.0/0.0.5] 2023-08-28 ++ ++### Added ++ ++- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) ++- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) ++- Add support for exponential histogram aggregations. ++ A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) ++- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) ++- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) ++- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) ++- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) ++- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) ++- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. ++ The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) ++- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) ++- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) ++- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) ++- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) ++- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) ++- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) ++- Support Go 1.21. (#4463) ++ ++### Changed ++ ++- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) ++- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) ++- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) ++- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) ++- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) ++- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) ++- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) ++- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) ++- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) ++- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) ++- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) ++- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) ++- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) ++- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) ++- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) ++- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) ++ ++### Removed ++ ++- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. ++ Use the added `WithProducer` option instead. (#4346) ++- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. ++ Notice that `PeriodicReader.ForceFlush` is still available. (#4375) ++ ++### Fixed ++ ++- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) ++- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) ++- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) ++- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) ++- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) ++- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) ++- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) ++- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) ++- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) ++- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) ++- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) ++- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) ++- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) ++- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) ++- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) ++- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) ++- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) ++- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) ++- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) ++ ++### Deprecated ++ ++- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. ++ OpenTelemetry dropped support for Jaeger exporter in July 2023. ++ Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` ++ or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) ++- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) ++- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) ++- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) ++- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. ++ Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) ++ ++## [1.16.0/0.39.0] 2023-05-18 ++ ++This release contains the first stable release of the OpenTelemetry Go [metric API]. ++Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++### Added ++ ++- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. ++ The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) ++- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. ++ The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) ++- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) ++- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) ++- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) ++ ++### Changed ++ ++- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) ++- `MeterProvider` returns noop meters once it has been shutdown. (#4154) ++ ++### Removed ++ ++- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. ++ Use `go.opentelemetry.io/otel/metric` instead. (#4055) ++ ++### Fixed ++ ++- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) ++ ++## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 ++ ++This is a release candidate for the v1.16.0/v0.39.0 release. ++That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++### Added ++ ++- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) ++ - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. ++ - Use `GetMeterProivder` for a global `metric.MeterProvider`. ++ - Use `SetMeterProivder` to set the global `metric.MeterProvider`. ++ ++### Changed ++ ++- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. ++ This stages the metric API to be released as a stable module. (#4038) ++ ++### Removed ++ ++- The `go.opentelemetry.io/otel/metric/global` package is removed. ++ Use `go.opentelemetry.io/otel` instead. (#4039) ++ ++## [1.15.1/0.38.1] 2023-05-02 ++ ++### Fixed ++ ++- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) ++ ++## [1.15.0/0.38.0] 2023-04-27 ++ ++### Added ++ ++- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) ++- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) ++- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) ++- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) ++ - The `AddConfig` used to hold configuration for addition measurements ++ - `NewAddConfig` used to create a new `AddConfig` ++ - `AddOption` used to configure an `AddConfig` ++ - The `RecordConfig` used to hold configuration for recorded measurements ++ - `NewRecordConfig` used to create a new `RecordConfig` ++ - `RecordOption` used to configure a `RecordConfig` ++ - The `ObserveConfig` used to hold configuration for observed measurements ++ - `NewObserveConfig` used to create a new `ObserveConfig` ++ - `ObserveOption` used to configure an `ObserveConfig` ++- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. ++ They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) ++- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) ++- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) ++ ++### Changed ++ ++- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) ++- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. ++ This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) ++- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) ++ - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` ++- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) ++- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) ++- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) ++ - The `Int64Counter.Add` method now accepts `...AddOption` ++ - The `Float64Counter.Add` method now accepts `...AddOption` ++ - The `Int64UpDownCounter.Add` method now accepts `...AddOption` ++ - The `Float64UpDownCounter.Add` method now accepts `...AddOption` ++ - The `Int64Histogram.Record` method now accepts `...RecordOption` ++ - The `Float64Histogram.Record` method now accepts `...RecordOption` ++ - The `Int64Observer.Observe` method now accepts `...ObserveOption` ++ - The `Float64Observer.Observe` method now accepts `...ObserveOption` ++- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) ++ - The `Observer.ObserveInt64` method now accepts `...ObserveOption` ++ - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` ++- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) ++ ++### Fixed ++ ++- `TracerProvider` allows calling `Tracer()` while it's shutting down. ++ It used to deadlock. (#3924) ++- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) ++- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) ++- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) ++ ++### Deprecated ++ ++- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. ++ Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) ++ ++## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 ++ ++This is a release candidate for the v1.15.0/v0.38.0 release. ++That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++### Added ++ ++- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) ++- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) ++- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. ++ Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) ++- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) ++- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) ++- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) ++ ++### Changed ++ ++- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) ++- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) ++- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) ++- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) ++- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) ++- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) ++- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) ++- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) ++- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) ++- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) ++ ++### Fixed ++ ++- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) ++ ++### Removed ++ ++- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) ++- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) ++- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. ++ Use the added `float64` instrument configuration instead. (#3895) ++- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. ++ Use the added `int64` instrument configuration instead. (#3895) ++- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) ++ ++## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 ++ ++This is a release candidate for the v1.15.0/v0.38.0 release. ++That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++This release drops the compatibility guarantee of [Go 1.18]. ++ ++### Added ++ ++- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) ++ - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. ++ - Use `GetMeterProivder` for a global `metric.MeterProvider`. ++ - Use `SetMeterProivder` to set the global `metric.MeterProvider`. ++ ++### Changed ++ ++- Dropped compatibility testing for [Go 1.18]. ++ The project no longer guarantees support for this version of Go. (#3813) ++ ++### Fixed ++ ++- Handle empty environment variable as it they were not set. (#3764) ++- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) ++- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) ++- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) ++ ++### Deprecated ++ ++- The `go.opentelemetry.io/otel/metric/global` package is deprecated. ++ Use `go.opentelemetry.io/otel` instead. (#3818) ++ ++### Removed ++ ++- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) ++ ++## [1.14.0/0.37.0/0.0.4] 2023-02-27 ++ ++This release is the last to support [Go 1.18]. ++The next release will require at least [Go 1.19]. ++ ++### Added ++ ++- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) ++- Support [Go 1.20]. (#3693) ++- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. ++ The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) ++ - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: ++ - `OtelScopeNameKey` -> `OTelScopeNameKey` ++ - `OtelScopeVersionKey` -> `OTelScopeVersionKey` ++ - `OtelLibraryNameKey` -> `OTelLibraryNameKey` ++ - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` ++ - `OtelStatusCodeKey` -> `OTelStatusCodeKey` ++ - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` ++ - `OtelStatusCodeOk` -> `OTelStatusCodeOk` ++ - `OtelStatusCodeError` -> `OTelStatusCodeError` ++ - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: ++ - `OtelScopeName` -> `OTelScopeName` ++ - `OtelScopeVersion` -> `OTelScopeVersion` ++ - `OtelLibraryName` -> `OTelLibraryName` ++ - `OtelLibraryVersion` -> `OTelLibraryVersion` ++ - `OtelStatusDescription` -> `OTelStatusDescription` ++- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. ++ See the [README](./bridge/opentracing/README.md) for more information. (#3570) ++- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) ++- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) ++- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) ++ - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. ++ - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. ++ ++### Changed ++ ++- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) ++- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. ++ This change is made to enable memory reuse by SDK users. (#3732) ++- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) ++ ++### Fixed ++ ++- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) ++- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) ++- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) ++- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) ++- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) ++- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) ++- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) ++ ++### Deprecated ++ ++- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. ++ Use the equivalent unit string instead. (#3776) ++ - Use `"1"` instead of `unit.Dimensionless` ++ - Use `"By"` instead of `unit.Bytes` ++ - Use `"ms"` instead of `unit.Milliseconds` ++ ++## [1.13.0/0.36.0] 2023-02-07 ++ ++### Added ++ ++- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. ++ These functions ensure semantic convention type correctness. (#3675) ++ ++### Fixed ++ ++- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) ++ - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` ++ - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` ++ - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` ++ - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` ++ - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` ++ ++### Removed ++ ++- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) ++- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) ++- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) ++- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) ++ ++## [1.12.0/0.35.0] 2023-01-28 ++ ++### Added ++ ++- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. ++ This options is used to configure `int64` Observer callbacks during their creation. (#3507) ++- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. ++ This options is used to configure `float64` Observer callbacks during their creation. (#3507) ++- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. ++ These additions are used to enable external metric Producers. (#3524) ++- The `Callback` function type to `go.opentelemetry.io/otel/metric`. ++ This new named function type is registered with a `Meter`. (#3564) ++- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. ++ The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) ++ - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. ++ - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. ++- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. ++ The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) ++- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. ++ The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) ++- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. ++ The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) ++- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. ++ These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) ++ - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` ++ - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` ++ - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` ++ - `Int64ObservableCounter` replaces the `asyncint64.Counter` ++ - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` ++ - `Int64ObservableGauge` replaces the `asyncint64.Gauge` ++ - `Float64Counter` replaces the `syncfloat64.Counter` ++ - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` ++ - `Float64Histogram` replaces the `syncfloat64.Histogram` ++ - `Int64Counter` replaces the `syncint64.Counter` ++ - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` ++ - `Int64Histogram` replaces the `syncint64.Histogram` ++- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. ++ This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) ++- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. ++ This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) ++- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. ++ The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) ++ ++### Changed ++ ++- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) ++- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) ++ - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. ++ - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. ++ - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. ++ - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. ++- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. ++ This `Registration` can be used to unregister callbacks. (#3522) ++- Global error handler uses an atomic value instead of a mutex. (#3543) ++- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) ++- Global logger uses an atomic value instead of a mutex. (#3545) ++- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) ++- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. ++ This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) ++- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. ++ Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) ++- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) ++- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) ++ - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` ++ - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` ++ - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` ++ - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` ++ - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` ++ - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` ++- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. ++ - The named `Callback` replaces the inline function parameter. (#3564) ++ - `Callback` is required to return an error. (#3576) ++ - `Callback` accepts the added `Observer` parameter added. ++ This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) ++ - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) ++- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. ++ This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. ++ Instead it uses the `net.sock.peer` attributes. (#3581) ++- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) ++ ++### Fixed ++ ++- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) ++- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. ++ Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) ++ ++### Deprecated ++ ++- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. ++ Use `NewMetricProducer` instead. (#3541) ++- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. ++ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) ++- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. ++ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) ++- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. ++ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) ++- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. ++ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) ++- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. ++ Use `NewTracerProvider` instead. (#3116) ++ ++### Removed ++ ++- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) ++- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. ++ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) ++ - The `Counter` method is replaced by `Meter.Int64ObservableCounter` ++ - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` ++ - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` ++- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. ++ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) ++ - The `Counter` method is replaced by `Meter.Float64ObservableCounter` ++ - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` ++ - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` ++- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. ++ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) ++ - The `Counter` method is replaced by `Meter.Int64Counter` ++ - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` ++ - The `Histogram` method is replaced by `Meter.Int64Histogram` ++- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. ++ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) ++ - The `Counter` method is replaced by `Meter.Float64Counter` ++ - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` ++ - The `Histogram` method is replaced by `Meter.Float64Histogram` ++ ++## [1.11.2/0.34.0] 2022-12-05 ++ ++### Added ++ ++- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. ++ This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) ++- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. ++ This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) ++- OTLP exporters now recognize: (#3363) ++ - `OTEL_EXPORTER_OTLP_INSECURE` ++ - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` ++ - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` ++ - `OTEL_EXPORTER_OTLP_CLIENT_KEY` ++ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` ++ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` ++ - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` ++ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` ++ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` ++- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. ++ These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) ++- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. ++ These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) ++- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) ++- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) ++ ++### Changed ++ ++- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. ++ Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. ++ The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) ++- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) ++- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) ++- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) ++- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) ++ ++### Fixed ++ ++- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) ++- Remove comparable requirement for `Reader`s. (#3387) ++- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) ++- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) ++- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) ++- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) ++- Re-enabled Attribute Filters in the Metric SDK. (#3396) ++- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) ++- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) ++- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) ++- Prevent duplicate Prometheus description, unit, and type. (#3469) ++- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) ++ ++### Removed ++ ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) ++ ++### Deprecated ++ ++- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. ++ Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) ++ ++## [1.11.1/0.33.0] 2022-10-19 ++ ++### Added ++ ++- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. ++ By default, it will register with the default Prometheus registerer. ++ A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) ++- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) ++- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) ++ ++### Changed ++ ++- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. ++ It will return an error if the exporter fails to register with Prometheus. (#3239) ++ ++### Fixed ++ ++- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) ++- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. ++ This fixes the implementation to be compliant with the W3C specification. (#3226) ++- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) ++- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) ++- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) ++- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) ++- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) ++- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) ++- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. ++ Instead the exporter is defined as an "unchecked" collector for Prometheus. ++ This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) ++- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) ++- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. ++ This can be disabled using the `WithoutUnits()` option added to that package. (#3352) ++ ++## [1.11.0/0.32.3] 2022-10-12 ++ ++### Added ++ ++- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) ++ ++### Changed ++ ++- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) ++- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. ++ This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) ++ ++## [0.32.2] Metric SDK (Alpha) - 2022-10-11 ++ ++### Added ++ ++- Added an example of using metric views to customize instruments. (#3177) ++- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) ++ ++### Changed ++ ++- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) ++- Update histogram default bounds to match the requirements of the latest specification. (#3222) ++- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) ++ ++### Fixed ++ ++- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) ++- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) ++- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) ++- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) ++- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) ++ ++## [0.32.1] Metric SDK (Alpha) - 2022-09-22 ++ ++### Changed ++ ++- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. ++ Invalid characters are replaced with `_`. (#3212) ++ ++### Added ++ ++- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) ++- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) ++ ++### Fixed ++ ++- Updated go.mods to point to valid versions of the sdk. (#3216) ++- Set the `MeterProvider` resource on all exported metric data. (#3218) ++ ++## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 ++ ++### Changed ++ ++- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. ++ Please see the package documentation for how the new SDK is initialized and configured. (#3175) ++- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) ++ ++### Removed ++ ++- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. ++ A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. ++ A replacement package that supports the new metric SDK will be added back in a future release. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) ++- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) ++- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) ++- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) ++- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) ++- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) ++ + ## [1.10.0] - 2022-09-09 + + ### Added +@@ -191,7 +1005,7 @@ Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be mod + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` +- ++ + If the provided environment variables are invalid (negative), the default values would be used. + - Rename the `gc` runtime name to `go` (#2560) + - Add resource container ID detection. (#2418) +@@ -452,7 +1266,7 @@ This release includes an API and SDK for the tracing signal that will comply wit + - Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) + - The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) + - Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +-- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195) ++- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) + - Fixed typos in resources.go. (#2201) + + ## [1.0.0-RC2] - 2021-07-26 +@@ -898,7 +1712,7 @@ with major version 0. + - `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) + - Added documentation about the project's versioning policy. (#1388) + - Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +-- Added codeql worfklow to GitHub Actions (#1428) ++- Added codeql workflow to GitHub Actions (#1428) + - Added Gosec workflow to GitHub Actions (#1429) + - Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) + - Add an OpenCensus exporter bridge. (#1444) +@@ -1741,7 +2555,7 @@ There is still a possibility of breaking changes. + + ### Fixed + +-- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428) ++- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) + + ## [0.2.1] - 2020-01-08 + +@@ -1907,7 +2721,27 @@ It contains api and sdk for trace and meter. + - CircleCI build CI manifest files. + - CODEOWNERS file to track owners of this project. + +-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.10.0...HEAD ++[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.20.0...HEAD ++[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 ++[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 ++[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 ++[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 ++[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 ++[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 ++[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 ++[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 ++[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 ++[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 ++[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 ++[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 ++[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 ++[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 ++[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 ++[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 ++[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 ++[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 ++[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 ++[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 + [1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 + [1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 + [1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +@@ -1959,3 +2793,11 @@ It contains api and sdk for trace and meter. + [0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 + [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 + [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 ++ ++[Go 1.20]: https://go.dev/doc/go1.20 ++[Go 1.19]: https://go.dev/doc/go1.19 ++[Go 1.18]: https://go.dev/doc/go1.18 ++ ++[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric ++[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric ++[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace +diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS +index c4012ed6ca1..623740007d4 100644 +--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS ++++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS +@@ -12,6 +12,6 @@ + # https://help.github.com/en/articles/about-code-owners + # + +-* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu ++* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +-CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod ++CODEOWNERS @MrAlias @MadVikingGod @pellared +\ No newline at end of file +diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +index 9371a481ab1..a00dbca7b08 100644 +--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md ++++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +@@ -6,7 +6,7 @@ OpenTelemetry + repo for information on this and other language SIGs. + + See the [public meeting +-notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b) ++notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) + for a summary description of past meetings. To request edit access, + join the meeting or get in touch on + [Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). +@@ -28,6 +28,11 @@ precommit` - the `precommit` target is the default). + The `precommit` target also fixes the formatting of the code and + checks the status of the go module files. + ++Additionally, there is a `codespell` target that checks for common ++typos in the code. It is not run by default, but you can run it ++manually with `make codespell`. It will set up a virtual environment ++in `venv` and install `codespell` there. ++ + If after running `make precommit` the output of `git status` contains + `nothing to commit, working tree clean` then it means that everything + is up-to-date and properly formatted. +@@ -94,38 +99,66 @@ request ID to the entry you added to `CHANGELOG.md`. + + ### How to Get PRs Merged + +-A PR is considered to be **ready to merge** when: +- +-* It has received two approvals from Collaborators/Maintainers (at +- different companies). This is not enforced through technical means +- and a PR may be **ready to merge** with a single approval if the change +- and its approach have been discussed and consensus reached. +-* Feedback has been addressed. +-* Any substantive changes to your PR will require that you clear any prior +- Approval reviews, this includes changes resulting from other feedback. Unless +- the approver explicitly stated that their approval will persist across +- changes it should be assumed that the PR needs their review again. Other +- project members (e.g. approvers, maintainers) can help with this if there are +- any questions or if you forget to clear reviews. +-* It has been open for review for at least one working day. This gives +- people reasonable time to review. +-* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for +- one day and may be merged with a single Maintainer's approval. +-* `CHANGELOG.md` has been updated to reflect what has been +- added, changed, removed, or fixed. +-* `README.md` has been updated if necessary. +-* Urgent fix can take exception as long as it has been actively +- communicated. +- +-Any Maintainer can merge the PR once it is **ready to merge**. ++A PR is considered **ready to merge** when: ++ ++* It has received two qualified approvals[^1]. ++ ++ This is not enforced through automation, but needs to be validated by the ++ maintainer merging. ++ * The qualified approvals need to be from [Approver]s/[Maintainer]s ++ affiliated with different companies. Two qualified approvals from ++ [Approver]s or [Maintainer]s affiliated with the same company counts as a ++ single qualified approval. ++ * PRs introducing changes that have already been discussed and consensus ++ reached only need one qualified approval. The discussion and resolution ++ needs to be linked to the PR. ++ * Trivial changes[^2] only need one qualified approval. ++ ++* All feedback has been addressed. ++ * All PR comments and suggestions are resolved. ++ * All GitHub Pull Request reviews with a status of "Request changes" have ++ been addressed. Another review by the objecting reviewer with a different ++ status can be submitted to clear the original review, or the review can be ++ dismissed by a [Maintainer] when the issues from the original review have ++ been addressed. ++ * Any comments or reviews that cannot be resolved between the PR author and ++ reviewers can be submitted to the community [Approver]s and [Maintainer]s ++ during the weekly SIG meeting. If consensus is reached among the ++ [Approver]s and [Maintainer]s during the SIG meeting the objections to the ++ PR may be dismissed or resolved or the PR closed by a [Maintainer]. ++ * Any substantive changes to the PR require existing Approval reviews be ++ cleared unless the approver explicitly states that their approval persists ++ across changes. This includes changes resulting from other feedback. ++ [Approver]s and [Maintainer]s can help in clearing reviews and they should ++ be consulted if there are any questions. ++ ++* The PR branch is up to date with the base branch it is merging into. ++ * To ensure this does not block the PR, it should be configured to allow ++ maintainers to update it. ++ ++* It has been open for review for at least one working day. This gives people ++ reasonable time to review. ++ * Trivial changes[^2] do not have to wait for one day and may be merged with ++ a single [Maintainer]'s approval. ++ ++* All required GitHub workflows have succeeded. ++* Urgent fix can take exception as long as it has been actively communicated ++ among [Maintainer]s. ++ ++Any [Maintainer] can merge the PR once the above criteria have been met. ++ ++[^1]: A qualified approval is a GitHub Pull Request review with "Approve" ++ status from an OpenTelemetry Go [Approver] or [Maintainer]. ++[^2]: Trivial changes include: typo corrections, cosmetic non-substantive ++ changes, documentation corrections or updates, dependency updates, etc. + + ## Design Choices + + As with other OpenTelemetry clients, opentelemetry-go follows the +-[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification). ++[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). + + It's especially valuable to read through the [library +-guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md). ++guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). + + ### Focus on Capabilities, Not Structure Compliance + +@@ -146,23 +179,23 @@ For a deeper discussion, see + + ## Documentation + +-Each non-example Go Module should have its own `README.md` containing: ++Each (non-internal, non-test) package must be documented using ++[Go Doc Comments](https://go.dev/doc/comment), ++preferably in a `doc.go` file. ++ ++Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) ++instead of putting code snippets in Go doc comments. ++In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). + +-- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/). +-- Brief description. +-- Installation instructions (and requirements if applicable). +-- Hyperlink to an example. Depending on the component the example can be: +- - An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go). +- - A sample Go application with its own `README.md`, like [here](example/zipkin). +-- Additional documentation sections such us: +- - Configuration, +- - Contributing, +- - References. ++You can install and run a "local Go Doc site" in the following way: + +-[Here](exporters/jaeger/README.md) is an example of a concise `README.md`. ++ ```sh ++ go install golang.org/x/pkgsite/cmd/pkgsite@latest ++ pkgsite ++ ``` + +-Moreover, it should be possible to navigate to any `README.md` from the +-root `README.md`. ++[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) ++is an example of a very well-documented package. + + ## Style Guide + +@@ -216,7 +249,7 @@ Meaning a `config` from one package should not be directly used by another. The + one exception is the API packages. The configs from the base API, eg. + `go.opentelemetry.io/otel/trace.TracerConfig` and + `go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +-by the SDK therefor it is expected that these are exported. ++by the SDK therefore it is expected that these are exported. + + When a config is exported we want to maintain forward and backward + compatibility, to achieve this no fields should be exported but should +@@ -234,12 +267,12 @@ func newConfig(options ...Option) config { + for _, option := range options { + config = option.apply(config) + } +- // Preform any validation here. ++ // Perform any validation here. + return config + } + ``` + +-If validation of the `config` options is also preformed this can return an ++If validation of the `config` options is also performed this can return an + error as well that is expected to be handled by the instantiation function + or propagated to the user. + +@@ -438,12 +471,37 @@ their parameters appropriately named. + #### Interface Stability + + All exported stable interfaces that include the following warning in their +-doumentation are allowed to be extended with additional methods. ++documentation are allowed to be extended with additional methods. + + > Warning: methods may be added to this interface in minor releases. + ++These interfaces are defined by the OpenTelemetry specification and will be ++updated as the specification evolves. ++ + Otherwise, stable interfaces MUST NOT be modified. + ++#### How to Change Specification Interfaces ++ ++When an API change must be made, we will update the SDK with the new method one ++release before the API change. This will allow the SDK one version before the ++API change to work seamlessly with the new API. ++ ++If an incompatible version of the SDK is used with the new API the application ++will fail to compile. ++ ++#### How Not to Change Specification Interfaces ++ ++We have explored using a v2 of the API to change interfaces and found that there ++was no way to introduce a v2 and have it work seamlessly with the v1 of the API. ++Problems happened with libraries that upgraded to v2 when an application did not, ++and would not produce any telemetry. ++ ++More detail of the approaches considered and their limitations can be found in ++the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) ++issue. ++ ++#### How to Change Other Interfaces ++ + If new functionality is needed for an interface that cannot be changed it MUST + be added by including an additional interface. That added interface can be a + simple interface for the specific functionality that you want to add or it can +@@ -498,29 +556,65 @@ functionality should be added, each one will need their own super-set + interfaces and will duplicate the pattern. For this reason, the simple targeted + interface that defines the specific functionality should be preferred. + ++### Testing ++ ++The tests should never leak goroutines. ++ ++Use the term `ConcurrentSafe` in the test name when it aims to verify the ++absence of race conditions. ++ ++### Internal packages ++ ++The use of internal packages should be scoped to a single module. A sub-module ++should never import from a parent internal package. This creates a coupling ++between the two modules where a user can upgrade the parent without the child ++and if the internal package API has changed it will fail to upgrade[^3]. ++ ++There are two known exceptions to this rule: ++ ++- `go.opentelemetry.io/otel/internal/global` ++ - This package manages global state for all of opentelemetry-go. It needs to ++ be a single package in order to ensure the uniqueness of the global state. ++- `go.opentelemetry.io/otel/internal/baggage` ++ - This package provides values in a `context.Context` that need to be ++ recognized by `go.opentelemetry.io/otel/baggage` and ++ `go.opentelemetry.io/otel/bridge/opentracing` but remain private. ++ ++If you have duplicate code in multiple modules, make that code into a Go ++template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] ++to render the templates in the desired locations. See [#4404] for an example of ++this. ++ ++[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 ++ + ## Approvers and Maintainers + +-Approvers: ++### Approvers + + - [Evan Torrie](https://github.com/evantorrie), Verizon Media +-- [Josh MacDonald](https://github.com/jmacd), LightStep + - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics + - [David Ashpole](https://github.com/dashpole), Google +-- [Robert PajÄ…k](https://github.com/pellared), Splunk + - [Chester Cheung](https://github.com/hanyuancheung), Tencent +-- [Damien Mathieu](https://github.com/dmathieu), Auth0/Okta ++- [Damien Mathieu](https://github.com/dmathieu), Elastic ++- [Anthony Mirabella](https://github.com/Aneurysm9), AWS + +-Maintainers: ++### Maintainers + + - [Aaron Clawson](https://github.com/MadVikingGod), LightStep +-- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ++- [Robert PajÄ…k](https://github.com/pellared), Splunk + - [Tyler Yahn](https://github.com/MrAlias), Splunk + +-Emeritus: ++### Emeritus + + - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep ++- [Josh MacDonald](https://github.com/jmacd), LightStep + + ### Become an Approver or a Maintainer + + See the [community membership document in OpenTelemetry community + repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). ++ ++[Approver]: #approvers ++[Maintainer]: #maintainers ++[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl ++[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 +diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile +index 18ffaa33a99..35fc189961b 100644 +--- a/vendor/go.opentelemetry.io/otel/Makefile ++++ b/vendor/go.opentelemetry.io/otel/Makefile +@@ -17,7 +17,7 @@ TOOLS_MOD_DIR := ./internal/tools + ALL_DOCS := $(shell find . -name '*.md' -type f | sort) + ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) + OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +-ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort) ++ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + + GO = go + TIMEOUT = 60 +@@ -25,8 +25,8 @@ TIMEOUT = 60 + .DEFAULT_GOAL := precommit + + .PHONY: precommit ci +-precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default +-ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage ++precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default ++ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage + + # Tools + +@@ -71,21 +71,78 @@ $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + GOJQ = $(TOOLS)/gojq + $(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + ++GOTMPL = $(TOOLS)/gotmpl ++$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl ++ ++GORELEASE = $(TOOLS)/gorelease ++$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease ++ ++GOVULNCHECK = $(TOOLS)/govulncheck ++$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck ++ + .PHONY: tools +-tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) ++tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) + +-# Build ++# Virtualized python tools via docker ++ ++# The directory where the virtual environment is created. ++VENVDIR := venv ++ ++# The directory where the python tools are installed. ++PYTOOLS := $(VENVDIR)/bin ++ ++# The pip executable in the virtual environment. ++PIP := $(PYTOOLS)/pip ++ ++# The directory in the docker image where the current directory is mounted. ++WORKDIR := /workdir + +-.PHONY: generate build ++# The python image to use for the virtual environment. ++PYTHONIMAGE := python:3.11.3-slim-bullseye + +-generate: $(OTEL_GO_MOD_DIRS:%=generate/%) +-generate/%: DIR=$* +-generate/%: | $(STRINGER) $(PORTO) ++# Run the python image with the current directory mounted. ++DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) ++ ++# Create a virtual environment for Python tools. ++$(PYTOOLS): ++# The `--upgrade` flag is needed to ensure that the virtual environment is ++# created with the latest pip version. ++ @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" ++ ++# Install python packages into the virtual environment. ++$(PYTOOLS)/%: | $(PYTOOLS) ++ @$(DOCKERPY) $(PIP) install -r requirements.txt ++ ++CODESPELL = $(PYTOOLS)/codespell ++$(CODESPELL): PACKAGE=codespell ++ ++# Generate ++ ++.PHONY: generate ++generate: go-generate vanity-import-fix ++ ++.PHONY: go-generate ++go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) ++go-generate/%: DIR=$* ++go-generate/%: | $(STRINGER) $(GOTMPL) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ +- && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w . ++ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... ++ ++.PHONY: vanity-import-fix ++vanity-import-fix: | $(PORTO) ++ @$(PORTO) --include-internal -w . ++ ++# Generate go.work file for local development. ++.PHONY: go-work ++go-work: | $(CROSSLINK) ++ $(CROSSLINK) work --root=$(shell pwd) ++ ++# Build ++ ++.PHONY: build + +-build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) ++build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) + build/%: DIR=$* + build/%: + @echo "$(GO) build $(DIR)/..." \ +@@ -135,6 +192,18 @@ test-coverage: | $(GOCOVMERGE) + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + ++# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. ++BENCHMARK_TARGETS := sdk/trace ++.PHONY: benchmark ++benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) ++BENCHMARK_FILTER = . ++# You can override the filter for a particular directory by adding a rule here. ++benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample ++benchmark/%: ++ @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ ++ && cd $* \ ++ $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) ++ + .PHONY: golangci-lint golangci-lint-fix + golangci-lint-fix: ARGS=--fix + golangci-lint-fix: golangci-lint +@@ -156,30 +225,38 @@ go-mod-tidy/%: DIR=$* + go-mod-tidy/%: | crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ +- && $(GO) mod tidy -compat=1.17 ++ && $(GO) mod tidy -compat=1.20 + + .PHONY: lint-modules + lint-modules: go-mod-tidy + + .PHONY: lint +-lint: misspell lint-modules golangci-lint ++lint: misspell lint-modules golangci-lint govulncheck + + .PHONY: vanity-import-check + vanity-import-check: | $(PORTO) +- @$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)" +- +-.PHONY: vanity-import-fix +-vanity-import-fix: | $(PORTO) +- @$(PORTO) --include-internal -w . ++ @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) + + .PHONY: misspell + misspell: | $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + ++.PHONY: govulncheck ++govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) ++govulncheck/%: DIR=$* ++govulncheck/%: | $(GOVULNCHECK) ++ @echo "govulncheck ./... in $(DIR)" \ ++ && cd $(DIR) \ ++ && $(GOVULNCHECK) ./... ++ ++.PHONY: codespell ++codespell: | $(CODESPELL) ++ @$(DOCKERPY) $(CODESPELL) ++ + .PHONY: license-check + license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ +- awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ ++ awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ +@@ -189,7 +266,7 @@ license-check: + DEPENDABOT_CONFIG = .github/dependabot.yml + .PHONY: dependabot-check + dependabot-check: | $(DBOTCONF) +- @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)" ++ @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) + + .PHONY: dependabot-generate + dependabot-generate: | $(DBOTCONF) +@@ -208,11 +285,22 @@ check-clean-work-tree: + SEMCONVPKG ?= "semconv/" + .PHONY: semconv-generate + semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) +- @[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) +- @[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) +- @$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/trace" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" +- @$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/resource" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" +- @$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" ++ [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) ++ [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) ++ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" ++ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" ++ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" ++ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" ++ $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" ++ ++.PHONY: gorelease ++gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) ++gorelease/%: DIR=$* ++gorelease/%:| $(GORELEASE) ++ @echo "gorelease in $(DIR):" \ ++ && cd $(DIR) \ ++ && $(GORELEASE) \ ++ || echo "" + + .PHONY: prerelease + prerelease: | $(MULTIMOD) +@@ -224,3 +312,7 @@ COMMIT ?= "HEAD" + add-tags: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} ++ ++.PHONY: lint-markdown ++lint-markdown: ++ docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md +diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md +index 4aeecb8bfe7..2c5b0cc28ab 100644 +--- a/vendor/go.opentelemetry.io/otel/README.md ++++ b/vendor/go.opentelemetry.io/otel/README.md +@@ -11,22 +11,22 @@ It provides a set of APIs to directly measure performance and behavior of your s + + ## Project Status + +-| Signal | Status | Project | +-| ------- | ---------- | ------- | +-| Traces | Stable | N/A | +-| Metrics | Alpha | N/A | +-| Logs | Frozen [1] | N/A | ++| Signal | Status | ++|---------|------------| ++| Traces | Stable | ++| Metrics | Stable | ++| Logs | Design [1] | + +-- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics. ++- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). + No Logs Pull Requests are currently being accepted. + +-Progress and status specific to this repository is tracked in our local ++Progress and status specific to this repository is tracked in our + [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) + and + [milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + + Project versioning information and stability guarantees can be found in the +-[versioning documentation](./VERSIONING.md). ++[versioning documentation](VERSIONING.md). + + ### Compatibility + +@@ -49,22 +49,17 @@ stop ensuring compatibility with these versions in the following manner: + Currently, this project supports the following environments. + + | OS | Go Version | Architecture | +-| ------- | ---------- | ------------ | +-| Ubuntu | 1.19 | amd64 | +-| Ubuntu | 1.18 | amd64 | +-| Ubuntu | 1.17 | amd64 | +-| Ubuntu | 1.19 | 386 | +-| Ubuntu | 1.18 | 386 | +-| Ubuntu | 1.17 | 386 | +-| MacOS | 1.19 | amd64 | +-| MacOS | 1.18 | amd64 | +-| MacOS | 1.17 | amd64 | +-| Windows | 1.19 | amd64 | +-| Windows | 1.18 | amd64 | +-| Windows | 1.17 | amd64 | +-| Windows | 1.19 | 386 | +-| Windows | 1.18 | 386 | +-| Windows | 1.17 | 386 | ++|---------|------------|--------------| ++| Ubuntu | 1.21 | amd64 | ++| Ubuntu | 1.20 | amd64 | ++| Ubuntu | 1.21 | 386 | ++| Ubuntu | 1.20 | 386 | ++| MacOS | 1.21 | amd64 | ++| MacOS | 1.20 | amd64 | ++| Windows | 1.21 | amd64 | ++| Windows | 1.20 | amd64 | ++| Windows | 1.21 | 386 | ++| Windows | 1.20 | 386 | + + While this project should work for other systems, no compatibility guarantees + are made for those systems currently. +@@ -102,12 +97,11 @@ export pipeline to send that telemetry to an observability platform. + All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + + | Exporter | Metrics | Traces | +-| :-----------------------------------: | :-----: | :----: | +-| [Jaeger](./exporters/jaeger/) | | ✓ | +-| [OTLP](./exporters/otlp/) | ✓ | ✓ | +-| [Prometheus](./exporters/prometheus/) | ✓ | | +-| [stdout](./exporters/stdout/) | ✓ | ✓ | +-| [Zipkin](./exporters/zipkin/) | | ✓ | ++|---------------------------------------|:-------:|:------:| ++| [OTLP](./exporters/otlp/) | ✓ | ✓ | ++| [Prometheus](./exporters/prometheus/) | ✓ | | ++| [stdout](./exporters/stdout/) | ✓ | ✓ | ++| [Zipkin](./exporters/zipkin/) | | ✓ | + + ## Contributing + +diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md +index 71e57625479..82ce3ee46a1 100644 +--- a/vendor/go.opentelemetry.io/otel/RELEASING.md ++++ b/vendor/go.opentelemetry.io/otel/RELEASING.md +@@ -2,24 +2,31 @@ + + ## Semantic Convention Generation + +-New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated. ++New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. + The `semconv-generate` make target is used for this. + +-1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag. +-2. Run the `make semconv-generate ...` target from this repository. ++1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. ++2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` ++3. Run the `make semconv-generate ...` target from this repository. + + For example, + + ```sh +-export TAG="v1.7.0" # Change to the release version you are generating. +-export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification" +-git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" +-make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO. ++export TAG="v1.21.0" # Change to the release version you are generating. ++export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" ++docker pull otel/semconvgen:latest ++make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. + ``` + + This should create a new sub-package of [`semconv`](./semconv). + Ensure things look correct before submitting a pull request to include the addition. + ++## Breaking changes validation ++ ++You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. ++ ++You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). ++ + ## Pre-Release + + First, decide which module sets will be released and update their versions +@@ -116,7 +123,17 @@ Once verified be sure to [make a release for the `contrib` repository](https://g + + ### Website Documentation + +-Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). ++Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go]. + Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +-[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification ++[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions ++[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/ ++[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go ++ ++### Demo Repository ++ ++Bump the dependencies in the following Go services: ++ ++- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) ++- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) ++- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go +new file mode 100644 +index 00000000000..638c213d59a +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go +@@ -0,0 +1,60 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package attribute // import "go.opentelemetry.io/otel/attribute" ++ ++// Filter supports removing certain attributes from attribute sets. When ++// the filter returns true, the attribute will be kept in the filtered ++// attribute set. When the filter returns false, the attribute is excluded ++// from the filtered attribute set, and the attribute instead appears in ++// the removed list of excluded attributes. ++type Filter func(KeyValue) bool ++ ++// NewAllowKeysFilter returns a Filter that only allows attributes with one of ++// the provided keys. ++// ++// If keys is empty a deny-all filter is returned. ++func NewAllowKeysFilter(keys ...Key) Filter { ++ if len(keys) <= 0 { ++ return func(kv KeyValue) bool { return false } ++ } ++ ++ allowed := make(map[Key]struct{}) ++ for _, k := range keys { ++ allowed[k] = struct{}{} ++ } ++ return func(kv KeyValue) bool { ++ _, ok := allowed[kv.Key] ++ return ok ++ } ++} ++ ++// NewDenyKeysFilter returns a Filter that only allows attributes ++// that do not have one of the provided keys. ++// ++// If keys is empty an allow-all filter is returned. ++func NewDenyKeysFilter(keys ...Key) Filter { ++ if len(keys) <= 0 { ++ return func(kv KeyValue) bool { return true } ++ } ++ ++ forbid := make(map[Key]struct{}) ++ for _, k := range keys { ++ forbid[k] = struct{}{} ++ } ++ return func(kv KeyValue) bool { ++ _, ok := forbid[kv.Key] ++ return !ok ++ } ++} +diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go +index 26be5983223..9f9303d4f15 100644 +--- a/vendor/go.opentelemetry.io/otel/attribute/set.go ++++ b/vendor/go.opentelemetry.io/otel/attribute/set.go +@@ -18,6 +18,7 @@ import ( + "encoding/json" + "reflect" + "sort" ++ "sync" + ) + + type ( +@@ -38,13 +39,6 @@ type ( + iface interface{} + } + +- // Filter supports removing certain attributes from attribute sets. When +- // the filter returns true, the attribute will be kept in the filtered +- // attribute set. When the filter returns false, the attribute is excluded +- // from the filtered attribute set, and the attribute instead appears in +- // the removed list of excluded attributes. +- Filter func(KeyValue) bool +- + // Sortable implements sort.Interface, used for sorting KeyValue. This is + // an exported type to support a memory optimization. A pointer to one of + // these is needed for the call to sort.Stable(), which the caller may +@@ -62,6 +56,12 @@ var ( + iface: [0]KeyValue{}, + }, + } ++ ++ // sortables is a pool of Sortables used to create Sets with a user does ++ // not provide one. ++ sortables = sync.Pool{ ++ New: func() interface{} { return new(Sortable) }, ++ } + ) + + // EmptySet returns a reference to a Set with no elements. +@@ -91,7 +91,7 @@ func (l *Set) Len() int { + + // Get returns the KeyValue at ordered position idx in this set. + func (l *Set) Get(idx int) (KeyValue, bool) { +- if l == nil { ++ if l == nil || !l.equivalent.Valid() { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() +@@ -107,7 +107,7 @@ func (l *Set) Get(idx int) (KeyValue, bool) { + + // Value returns the value of a specified key in this set. + func (l *Set) Value(k Key) (Value, bool) { +- if l == nil { ++ if l == nil || !l.equivalent.Valid() { + return Value{}, false + } + rValue := l.equivalent.reflectValue() +@@ -191,7 +191,9 @@ func NewSet(kvs ...KeyValue) Set { + if len(kvs) == 0 { + return empty() + } +- s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil) ++ srt := sortables.Get().(*Sortable) ++ s, _ := NewSetWithSortableFiltered(kvs, srt, nil) ++ sortables.Put(srt) + return s + } + +@@ -218,7 +220,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + if len(kvs) == 0 { + return empty(), nil + } +- return NewSetWithSortableFiltered(kvs, new(Sortable), filter) ++ srt := sortables.Get().(*Sortable) ++ s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) ++ sortables.Put(srt) ++ return s, filtered + } + + // NewSetWithSortableFiltered returns a new Set. +diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go +index 57899f682e7..cb21dd5c096 100644 +--- a/vendor/go.opentelemetry.io/otel/attribute/value.go ++++ b/vendor/go.opentelemetry.io/otel/attribute/value.go +@@ -17,9 +17,11 @@ package attribute // import "go.opentelemetry.io/otel/attribute" + import ( + "encoding/json" + "fmt" ++ "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" ++ "go.opentelemetry.io/otel/internal/attribute" + ) + + //go:generate stringer -type=Type +@@ -66,12 +68,7 @@ func BoolValue(v bool) Value { + + // BoolSliceValue creates a BOOLSLICE Value. + func BoolSliceValue(v []bool) Value { +- cp := make([]bool, len(v)) +- copy(cp, v) +- return Value{ +- vtype: BOOLSLICE, +- slice: &cp, +- } ++ return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} + } + + // IntValue creates an INT64 Value. +@@ -81,13 +78,14 @@ func IntValue(v int) Value { + + // IntSliceValue creates an INTSLICE Value. + func IntSliceValue(v []int) Value { +- cp := make([]int64, 0, len(v)) +- for _, i := range v { +- cp = append(cp, int64(i)) ++ var int64Val int64 ++ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) ++ for i, val := range v { ++ cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, +- slice: &cp, ++ slice: cp.Elem().Interface(), + } + } + +@@ -101,12 +99,7 @@ func Int64Value(v int64) Value { + + // Int64SliceValue creates an INT64SLICE Value. + func Int64SliceValue(v []int64) Value { +- cp := make([]int64, len(v)) +- copy(cp, v) +- return Value{ +- vtype: INT64SLICE, +- slice: &cp, +- } ++ return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} + } + + // Float64Value creates a FLOAT64 Value. +@@ -119,12 +112,7 @@ func Float64Value(v float64) Value { + + // Float64SliceValue creates a FLOAT64SLICE Value. + func Float64SliceValue(v []float64) Value { +- cp := make([]float64, len(v)) +- copy(cp, v) +- return Value{ +- vtype: FLOAT64SLICE, +- slice: &cp, +- } ++ return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} + } + + // StringValue creates a STRING Value. +@@ -137,12 +125,7 @@ func StringValue(v string) Value { + + // StringSliceValue creates a STRINGSLICE Value. + func StringSliceValue(v []string) Value { +- cp := make([]string, len(v)) +- copy(cp, v) +- return Value{ +- vtype: STRINGSLICE, +- slice: &cp, +- } ++ return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} + } + + // Type returns a type of the Value. +@@ -159,10 +142,14 @@ func (v Value) AsBool() bool { + // AsBoolSlice returns the []bool value. Make sure that the Value's type is + // BOOLSLICE. + func (v Value) AsBoolSlice() []bool { +- if s, ok := v.slice.(*[]bool); ok { +- return *s ++ if v.vtype != BOOLSLICE { ++ return nil + } +- return nil ++ return v.asBoolSlice() ++} ++ ++func (v Value) asBoolSlice() []bool { ++ return attribute.AsBoolSlice(v.slice) + } + + // AsInt64 returns the int64 value. Make sure that the Value's type is +@@ -174,10 +161,14 @@ func (v Value) AsInt64() int64 { + // AsInt64Slice returns the []int64 value. Make sure that the Value's type is + // INT64SLICE. + func (v Value) AsInt64Slice() []int64 { +- if s, ok := v.slice.(*[]int64); ok { +- return *s ++ if v.vtype != INT64SLICE { ++ return nil + } +- return nil ++ return v.asInt64Slice() ++} ++ ++func (v Value) asInt64Slice() []int64 { ++ return attribute.AsInt64Slice(v.slice) + } + + // AsFloat64 returns the float64 value. Make sure that the Value's +@@ -189,10 +180,14 @@ func (v Value) AsFloat64() float64 { + // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is + // FLOAT64SLICE. + func (v Value) AsFloat64Slice() []float64 { +- if s, ok := v.slice.(*[]float64); ok { +- return *s ++ if v.vtype != FLOAT64SLICE { ++ return nil + } +- return nil ++ return v.asFloat64Slice() ++} ++ ++func (v Value) asFloat64Slice() []float64 { ++ return attribute.AsFloat64Slice(v.slice) + } + + // AsString returns the string value. Make sure that the Value's type +@@ -204,10 +199,14 @@ func (v Value) AsString() string { + // AsStringSlice returns the []string value. Make sure that the Value's type is + // STRINGSLICE. + func (v Value) AsStringSlice() []string { +- if s, ok := v.slice.(*[]string); ok { +- return *s ++ if v.vtype != STRINGSLICE { ++ return nil + } +- return nil ++ return v.asStringSlice() ++} ++ ++func (v Value) asStringSlice() []string { ++ return attribute.AsStringSlice(v.slice) + } + + type unknownValueType struct{} +@@ -218,19 +217,19 @@ func (v Value) AsInterface() interface{} { + case BOOL: + return v.AsBool() + case BOOLSLICE: +- return v.AsBoolSlice() ++ return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: +- return v.AsInt64Slice() ++ return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: +- return v.AsFloat64Slice() ++ return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: +- return v.AsStringSlice() ++ return v.asStringSlice() + } + return unknownValueType{} + } +@@ -239,19 +238,19 @@ func (v Value) AsInterface() interface{} { + func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: +- return fmt.Sprint(*(v.slice.(*[]bool))) ++ return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: +- return fmt.Sprint(*(v.slice.(*[]int64))) ++ return fmt.Sprint(v.asInt64Slice()) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: +- return fmt.Sprint(*(v.slice.(*[]float64))) ++ return fmt.Sprint(v.asFloat64Slice()) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: +- return fmt.Sprint(*(v.slice.(*[]string))) ++ return fmt.Sprint(v.asStringSlice()) + case STRING: + return v.stringly + default: +diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go +index eba180e04f8..84532cb1da3 100644 +--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go ++++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go +@@ -61,11 +61,6 @@ type Property struct { + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +- +- // hasData indicates whether the created property contains data or not. +- // Properties that do not contain data are invalid with no other check +- // required. +- hasData bool + } + + // NewKeyProperty returns a new Property for key. +@@ -76,7 +71,7 @@ func NewKeyProperty(key string) (Property, error) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + +- p := Property{key: key, hasData: true} ++ p := Property{key: key} + return p, nil + } + +@@ -95,7 +90,6 @@ func NewKeyValueProperty(key, value string) (Property, error) { + key: key, + value: value, + hasValue: true, +- hasData: true, + } + return p, nil + } +@@ -117,7 +111,7 @@ func parseProperty(property string) (Property, error) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + +- p := Property{hasData: true} ++ var p Property + if match[1] != "" { + p.key = match[1] + } else { +@@ -136,10 +130,6 @@ func (p Property) validate() error { + return fmt.Errorf("invalid property: %w", err) + } + +- if !p.hasData { +- return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p)) +- } +- + if !keyRe.MatchString(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } +@@ -250,8 +240,9 @@ type Member struct { + hasData bool + } + +-// NewMember returns a new Member from the passed arguments. An error is +-// returned if the created Member would be invalid according to the W3C ++// NewMember returns a new Member from the passed arguments. The key will be ++// used directly while the value will be url decoded after validation. An error ++// is returned if the created Member would be invalid according to the W3C + // Baggage specification. + func NewMember(key, value string, props ...Property) (Member, error) { + m := Member{ +@@ -263,7 +254,11 @@ func NewMember(key, value string, props ...Property) (Member, error) { + if err := m.validate(); err != nil { + return newInvalidMember(), err + } +- ++ decodedValue, err := url.PathUnescape(value) ++ if err != nil { ++ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) ++ } ++ m.value = decodedValue + return m, nil + } + +@@ -284,52 +279,45 @@ func parseMember(member string) (Member, error) { + props properties + ) + +- parts := strings.SplitN(member, propertyDelimiter, 2) +- switch len(parts) { +- case 2: ++ keyValue, properties, found := strings.Cut(member, propertyDelimiter) ++ if found { + // Parse the member properties. +- for _, pStr := range strings.Split(parts[1], propertyDelimiter) { ++ for _, pStr := range strings.Split(properties, propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } +- fallthrough +- case 1: +- // Parse the member key/value pair. +- +- // Take into account a value can contain equal signs (=). +- kv := strings.SplitN(parts[0], keyValueDelimiter, 2) +- if len(kv) != 2 { +- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) +- } +- // "Leading and trailing whitespaces are allowed but MUST be trimmed +- // when converting the header into a data structure." +- key = strings.TrimSpace(kv[0]) +- var err error +- value, err = url.QueryUnescape(strings.TrimSpace(kv[1])) +- if err != nil { +- return newInvalidMember(), fmt.Errorf("%w: %q", err, value) +- } +- if !keyRe.MatchString(key) { +- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) +- } +- if !valueRe.MatchString(value) { +- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) +- } +- default: +- // This should never happen unless a developer has changed the string +- // splitting somehow. Panic instead of failing silently and allowing +- // the bug to slip past the CI checks. +- panic("failed to parse baggage member") ++ } ++ // Parse the member key/value pair. ++ ++ // Take into account a value can contain equal signs (=). ++ k, v, found := strings.Cut(keyValue, keyValueDelimiter) ++ if !found { ++ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) ++ } ++ // "Leading and trailing whitespaces are allowed but MUST be trimmed ++ // when converting the header into a data structure." ++ key = strings.TrimSpace(k) ++ var err error ++ value, err = url.PathUnescape(strings.TrimSpace(v)) ++ if err != nil { ++ return newInvalidMember(), fmt.Errorf("%w: %q", err, value) ++ } ++ if !keyRe.MatchString(key) { ++ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) ++ } ++ if !valueRe.MatchString(value) { ++ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + return Member{key: key, value: value, properties: props, hasData: true}, nil + } + +-// validate ensures m conforms to the W3C Baggage specification, returning an +-// error otherwise. ++// validate ensures m conforms to the W3C Baggage specification. ++// A key is just an ASCII string, but a value must be URL encoded UTF-8, ++// returning an error otherwise. + func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) +@@ -465,6 +453,7 @@ func (b Baggage) Member(key string) Member { + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), ++ hasData: true, + } + } + +@@ -484,6 +473,7 @@ func (b Baggage) Members() []Member { + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), ++ hasData: true, + }) + } + return members +diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go +index 064a9279fd1..587ebae4e30 100644 +--- a/vendor/go.opentelemetry.io/otel/codes/codes.go ++++ b/vendor/go.opentelemetry.io/otel/codes/codes.go +@@ -23,10 +23,20 @@ import ( + const ( + // Unset is the default status code. + Unset Code = 0 ++ + // Error indicates the operation contains an error. ++ // ++ // NOTE: The error code in OTLP is 2. ++ // The value of this enum is only relevant to the internals ++ // of the Go SDK. + Error Code = 1 ++ + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. ++ // ++ // NOTE: The Ok code in OTLP is 1. ++ // The value of this enum is only relevant to the internals ++ // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go +index df3e0f1b621..4e328fbb4b3 100644 +--- a/vendor/go.opentelemetry.io/otel/codes/doc.go ++++ b/vendor/go.opentelemetry.io/otel/codes/doc.go +@@ -16,6 +16,6 @@ + Package codes defines the canonical error codes used by OpenTelemetry. + + It conforms to [the OpenTelemetry +-specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode). ++specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). + */ + package codes // import "go.opentelemetry.io/otel/codes" +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md +index ca91fd4f489..50295223182 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md +@@ -2,7 +2,7 @@ + + [![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) + +-[OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.5.0/specification/protocol/exporter.md) implementation. ++[OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md) implementation. + + ## Installation + +@@ -12,8 +12,8 @@ go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace + + ## Examples + +-- [Exporter setup and examples](./otlptracehttp/example_test.go) +-- [Full example sending telemetry to a local collector](../../../example/otel-collector) ++- [HTTP Exporter setup and examples](./otlptracehttp/example_test.go) ++- [Full example of gRPC Exporter sending telemetry to a local collector](../../../example/otel-collector) + + ## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) + +@@ -36,7 +36,7 @@ The `otlptracehttp` package implements a client for the span exporter that sends + The following environment variables can be used (instead of options objects) to + override the default configuration. For more information about how each of + these environment variables is interpreted, see [the OpenTelemetry +-specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md). ++specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md). + + | Environment variable | Option | Default value | + | ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- | +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +index c5ee6c098cc..0dbe15555b3 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +@@ -17,6 +17,7 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + import ( + "context" + "errors" ++ "fmt" + "sync" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" +@@ -45,7 +46,11 @@ func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) + return nil + } + +- return e.client.UploadTraces(ctx, protoSpans) ++ err := e.client.UploadTraces(ctx, protoSpans) ++ if err != nil { ++ return fmt.Errorf("traces export: %w", err) ++ } ++ return nil + } + + // Start establishes a connection to the receiving endpoint. +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +index 9d6e1898b14..86fb61a0dec 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +@@ -27,10 +27,10 @@ import ( + "google.golang.org/grpc/status" + + "go.opentelemetry.io/otel" +- "go.opentelemetry.io/otel/exporters/otlp/internal" +- "go.opentelemetry.io/otel/exporters/otlp/internal/retry" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" +- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" + ) +@@ -130,13 +130,16 @@ var errAlreadyStopped = errors.New("the client is already stopped") + // If the client has already stopped, an error will be returned describing + // this. + func (c *client) Stop(ctx context.Context) error { ++ // Make sure to return context error if the context is done when calling this method. ++ err := ctx.Err() ++ + // Acquire the c.tscMu lock within the ctx lifetime. + acquired := make(chan struct{}) + go func() { + c.tscMu.Lock() + close(acquired) + }() +- var err error ++ + select { + case <-ctx.Done(): + // The Stop timeout is reached. Kill any remaining exports to force +@@ -202,11 +205,12 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc + ResourceSpans: protoSpans, + }) + if resp != nil && resp.PartialSuccess != nil { +- otel.Handle(internal.PartialSuccessToError( +- internal.TracingPartialSuccess, +- resp.PartialSuccess.RejectedSpans, +- resp.PartialSuccess.ErrorMessage, +- )) ++ msg := resp.PartialSuccess.GetErrorMessage() ++ n := resp.PartialSuccess.GetRejectedSpans() ++ if n != 0 || msg != "" { ++ err := internal.TracePartialSuccessError(n, msg) ++ otel.Handle(err) ++ } + } + // nil is converted to OK. + if status.Code(err) == codes.OK { +@@ -255,7 +259,6 @@ func (c *client) exportContext(parent context.Context) (context.Context, context + // retryable returns if err identifies a request that can be retried and a + // duration to wait for if an explicit throttle time is included in err. + func retryable(err error) (bool, time.Duration) { +- //func retryable(err error) (bool, time.Duration) { + s := status.Convert(err) + switch s.Code() { + case codes.Canceled, +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +similarity index 57% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +index 67003c4a2fa..becb1f0fbbe 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/envconfig/envconfig.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,7 +15,7 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig" ++package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" + + import ( + "crypto/tls" +@@ -23,6 +26,8 @@ import ( + "strconv" + "strings" + "time" ++ ++ "go.opentelemetry.io/otel/internal/global" + ) + + // ConfigFn is the generic function used to set a config. +@@ -59,13 +64,26 @@ func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { + } + } + ++// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. ++func WithBool(n string, fn func(bool)) ConfigFn { ++ return func(e *EnvOptionsReader) { ++ if v, ok := e.GetEnvValue(n); ok { ++ b := strings.ToLower(v) == "true" ++ fn(b) ++ } ++ } ++} ++ + // WithDuration retrieves the specified config and passes it to ConfigFn as a duration. + func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { +- if d, err := strconv.Atoi(v); err == nil { +- fn(time.Duration(d) * time.Millisecond) ++ d, err := strconv.Atoi(v) ++ if err != nil { ++ global.Error(err, "parse duration", "input", v) ++ return + } ++ fn(time.Duration(d) * time.Millisecond) + } + } + } +@@ -83,26 +101,62 @@ func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) + func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { +- if u, err := url.Parse(v); err == nil { +- fn(u) ++ u, err := url.Parse(v) ++ if err != nil { ++ global.Error(err, "parse url", "input", v) ++ return + } ++ fn(u) + } + } + } + +-// WithTLSConfig retrieves the specified config and passes it to ConfigFn as a crypto/tls.Config. +-func WithTLSConfig(n string, fn func(*tls.Config)) func(e *EnvOptionsReader) { ++// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. ++func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { +- if b, err := e.ReadFile(v); err == nil { +- if c, err := createTLSConfig(b); err == nil { +- fn(c) +- } ++ b, err := e.ReadFile(v) ++ if err != nil { ++ global.Error(err, "read tls ca cert file", "file", v) ++ return ++ } ++ c, err := createCertPool(b) ++ if err != nil { ++ global.Error(err, "create tls cert pool") ++ return + } ++ fn(c) + } + } + } + ++// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. ++func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { ++ return func(e *EnvOptionsReader) { ++ vc, okc := e.GetEnvValue(nc) ++ vk, okk := e.GetEnvValue(nk) ++ if !okc || !okk { ++ return ++ } ++ cert, err := e.ReadFile(vc) ++ if err != nil { ++ global.Error(err, "read tls client cert", "file", vc) ++ return ++ } ++ key, err := e.ReadFile(vk) ++ if err != nil { ++ global.Error(err, "read tls client key", "file", vk) ++ return ++ } ++ crt, err := tls.X509KeyPair(cert, key) ++ if err != nil { ++ global.Error(err, "create tls client key pair") ++ return ++ } ++ fn(crt) ++ } ++} ++ + func keyWithNamespace(ns, key string) string { + if ns == "" { + return key +@@ -115,17 +169,20 @@ func stringToHeader(value string) map[string]string { + headers := make(map[string]string) + + for _, header := range headersPairs { +- nameValue := strings.SplitN(header, "=", 2) +- if len(nameValue) < 2 { ++ n, v, found := strings.Cut(header, "=") ++ if !found { ++ global.Error(errors.New("missing '="), "parse headers", "input", header) + continue + } +- name, err := url.QueryUnescape(nameValue[0]) ++ name, err := url.QueryUnescape(n) + if err != nil { ++ global.Error(err, "escape header key", "key", n) + continue + } + trimmedName := strings.TrimSpace(name) +- value, err := url.QueryUnescape(nameValue[1]) ++ value, err := url.QueryUnescape(v) + if err != nil { ++ global.Error(err, "escape header value", "value", v) + continue + } + trimmedValue := strings.TrimSpace(value) +@@ -136,13 +193,10 @@ func stringToHeader(value string) map[string]string { + return headers + } + +-func createTLSConfig(certBytes []byte) (*tls.Config, error) { ++func createCertPool(certBytes []byte) (*x509.CertPool, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } +- +- return &tls.Config{ +- RootCAs: cp, +- }, nil ++ return cp, nil + } +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +new file mode 100644 +index 00000000000..1fb29061894 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +@@ -0,0 +1,35 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" ++ ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go ++ ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go ++ ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go ++ ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go ++ ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +similarity index 74% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +index b29f618e3de..32f6dddb4f6 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,17 +15,18 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + + import ( + "crypto/tls" ++ "crypto/x509" + "net/url" + "os" + "path" + "strings" + "time" + +- "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" + ) + + // DefaultEnvOptionsReader is the default environments reader. +@@ -53,6 +57,7 @@ func ApplyHTTPEnvConfigs(cfg Config) Config { + func getOptionsFromEnv() []GenericOption { + opts := []GenericOption{} + ++ tlsConf := &tls.Config{} + DefaultEnvOptionsReader.Apply( + envconfig.WithURL("ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) +@@ -81,8 +86,13 @@ func getOptionsFromEnv() []GenericOption { + return cfg + }, withEndpointForGRPC(u))) + }), +- envconfig.WithTLSConfig("CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), +- envconfig.WithTLSConfig("TRACES_CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), ++ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), ++ envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), ++ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), ++ envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), ++ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), ++ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), ++ envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), +@@ -125,3 +135,19 @@ func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOpt + } + } + } ++ ++// revive:disable-next-line:flag-parameter ++func withInsecure(b bool) GenericOption { ++ if b { ++ return WithInsecure() ++ } ++ return WithSecure() ++} ++ ++func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { ++ return func(e *envconfig.EnvOptionsReader) { ++ if c.RootCAs != nil || len(c.Certificates) > 0 { ++ fn(c) ++ } ++ } ++} +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +similarity index 89% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +index 56e83b85334..19b8434d4d2 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,11 +15,13 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + + import ( + "crypto/tls" + "fmt" ++ "path" ++ "strings" + "time" + + "google.golang.org/grpc" +@@ -25,8 +30,8 @@ import ( + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + +- "go.opentelemetry.io/otel/exporters/otlp/internal" +- "go.opentelemetry.io/otel/exporters/otlp/internal/retry" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + ) + + const ( +@@ -82,13 +87,28 @@ func NewHTTPConfig(opts ...HTTPOption) Config { + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } +- cfg.Traces.URLPath = internal.CleanPath(cfg.Traces.URLPath, DefaultTracesPath) ++ cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath) + return cfg + } + ++// cleanPath returns a path with all spaces trimmed and all redundancies ++// removed. If urlPath is empty or cleaning it results in an empty string, ++// defaultPath is returned instead. ++func cleanPath(urlPath string, defaultPath string) string { ++ tmp := path.Clean(strings.TrimSpace(urlPath)) ++ if tmp == "." { ++ return defaultPath ++ } ++ if !path.IsAbs(tmp) { ++ tmp = fmt.Sprintf("/%s", tmp) ++ } ++ return tmp ++} ++ + // NewGRPCConfig returns a new Config with all settings applied from opts and + // any unset setting using the default gRPC config values. + func NewGRPCConfig(opts ...GRPCOption) Config { ++ userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version() + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), +@@ -97,6 +117,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, ++ DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)}, + } + cfg = ApplyGRPCEnvConfigs(cfg) + for _, opt := range opts { +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +similarity index 90% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +index c2d6c036152..d9dcdc96e7d 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,7 +15,7 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + + const ( + // DefaultCollectorGRPCPort is the default gRPC port of the collector. +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +similarity index 87% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +index 7287cf6cfeb..19b6d4b21f9 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,7 +15,7 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + + import ( + "crypto/tls" +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +similarity index 64% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +index 7994706ab51..076905e54bf 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/partialsuccess.go ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,23 +15,10 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" ++package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + + import "fmt" + +-// PartialSuccessDropKind indicates the kind of partial success error +-// received by an OTLP exporter, which corresponds with the signal +-// being exported. +-type PartialSuccessDropKind string +- +-const ( +- // TracingPartialSuccess indicates that some spans were rejected. +- TracingPartialSuccess PartialSuccessDropKind = "spans" +- +- // MetricsPartialSuccess indicates that some metric data points were rejected. +- MetricsPartialSuccess PartialSuccessDropKind = "metric data points" +-) +- + // PartialSuccess represents the underlying error for all handling + // OTLP partial success messages. Use `errors.Is(err, + // PartialSuccess{})` to test whether an error passed to the OTel +@@ -36,7 +26,7 @@ const ( + type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 +- RejectedKind PartialSuccessDropKind ++ RejectedKind string + } + + var _ error = PartialSuccess{} +@@ -56,13 +46,22 @@ func (ps PartialSuccess) Is(err error) bool { + return ok + } + +-// PartialSuccessToError produces an error suitable for passing to +-// `otel.Handle()` out of the fields in a partial success response, +-// independent of which signal produced the outcome. +-func PartialSuccessToError(kind PartialSuccessDropKind, itemsRejected int64, errorMessage string) error { ++// TracePartialSuccessError returns an error describing a partial success ++// response for the trace signal. ++func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { ++ return PartialSuccess{ ++ ErrorMessage: errorMessage, ++ RejectedItems: itemsRejected, ++ RejectedKind: "spans", ++ } ++} ++ ++// MetricPartialSuccessError returns an error describing a partial success ++// response for the metric signal. ++func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, +- RejectedKind: kind, ++ RejectedKind: "metric data points", + } + } +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +similarity index 80% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +index 3d43f7aea97..3ce7d6632b8 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/retry/retry.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -15,7 +18,7 @@ + // Package retry provides request retry functionality that can perform + // configurable exponential backoff for transient errors and honor any + // explicit throttle responses received. +-package retry // import "go.opentelemetry.io/otel/exporters/otlp/internal/retry" ++package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + + import ( + "context" +@@ -76,21 +79,21 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + } + } + +- // Do not use NewExponentialBackOff since it calls Reset and the code here +- // must call Reset after changing the InitialInterval (this saves an +- // unnecessary call to Now). +- b := &backoff.ExponentialBackOff{ +- InitialInterval: c.InitialInterval, +- RandomizationFactor: backoff.DefaultRandomizationFactor, +- Multiplier: backoff.DefaultMultiplier, +- MaxInterval: c.MaxInterval, +- MaxElapsedTime: c.MaxElapsedTime, +- Stop: backoff.Stop, +- Clock: backoff.SystemClock, +- } +- b.Reset() +- + return func(ctx context.Context, fn func(context.Context) error) error { ++ // Do not use NewExponentialBackOff since it calls Reset and the code here ++ // must call Reset after changing the InitialInterval (this saves an ++ // unnecessary call to Now). ++ b := &backoff.ExponentialBackOff{ ++ InitialInterval: c.InitialInterval, ++ RandomizationFactor: backoff.DefaultRandomizationFactor, ++ Multiplier: backoff.DefaultMultiplier, ++ MaxInterval: c.MaxInterval, ++ MaxElapsedTime: c.MaxElapsedTime, ++ Stop: backoff.Stop, ++ Clock: backoff.SystemClock, ++ } ++ b.Reset() ++ + for { + err := fn(ctx) + if err == nil { +@@ -119,8 +122,8 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + delay = throttle + } + +- if err := waitFunc(ctx, delay); err != nil { +- return err ++ if ctxErr := waitFunc(ctx, delay); ctxErr != nil { ++ return fmt.Errorf("%w: %s", ctxErr, err) + } + } + } +@@ -129,6 +132,9 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + // Allow override for testing. + var waitFunc = wait + ++// wait takes the caller's context, and the amount of time to wait. It will ++// return nil if the timer fires before or at the same time as the context's ++// deadline. This indicates that the call can be retried. + func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +index 3d09ce590d0..78ce9ad8f0b 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +@@ -22,8 +22,8 @@ import ( + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" +- "go.opentelemetry.io/otel/exporters/otlp/internal/retry" +- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + ) + + // Option applies an option to the gRPC driver. +diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +similarity index 65% +rename from vendor/go.opentelemetry.io/otel/metric/unit/doc.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +index f8e723593e6..10ac73ee3b8 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +@@ -12,9 +12,9 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-// Package unit provides units. +-// +-// This package is currently in a pre-GA phase. Backwards incompatible changes +-// may be introduced in subsequent minor version releases as we work to track +-// the evolving OpenTelemetry specification and user feedback. +-package unit // import "go.opentelemetry.io/otel/metric/unit" ++package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" ++ ++// Version is the current release version of the OpenTelemetry OTLP trace exporter in use. ++func Version() string { ++ return "1.19.0" ++} +diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go +index 36cf09f7290..4115fe3bbb5 100644 +--- a/vendor/go.opentelemetry.io/otel/handler.go ++++ b/vendor/go.opentelemetry.io/otel/handler.go +@@ -15,59 +15,16 @@ + package otel // import "go.opentelemetry.io/otel" + + import ( +- "log" +- "os" +- "sync" ++ "go.opentelemetry.io/otel/internal/global" + ) + + var ( +- // globalErrorHandler provides an ErrorHandler that can be used +- // throughout an OpenTelemetry instrumented project. When a user +- // specified ErrorHandler is registered (`SetErrorHandler`) all calls to +- // `Handle` and will be delegated to the registered ErrorHandler. +- globalErrorHandler = defaultErrorHandler() +- +- // Compile-time check that delegator implements ErrorHandler. +- _ ErrorHandler = (*delegator)(nil) +- // Compile-time check that errLogger implements ErrorHandler. +- _ ErrorHandler = (*errLogger)(nil) ++ // Compile-time check global.ErrDelegator implements ErrorHandler. ++ _ ErrorHandler = (*global.ErrDelegator)(nil) ++ // Compile-time check global.ErrLogger implements ErrorHandler. ++ _ ErrorHandler = (*global.ErrLogger)(nil) + ) + +-type delegator struct { +- lock *sync.RWMutex +- eh ErrorHandler +-} +- +-func (d *delegator) Handle(err error) { +- d.lock.RLock() +- defer d.lock.RUnlock() +- d.eh.Handle(err) +-} +- +-// setDelegate sets the ErrorHandler delegate. +-func (d *delegator) setDelegate(eh ErrorHandler) { +- d.lock.Lock() +- defer d.lock.Unlock() +- d.eh = eh +-} +- +-func defaultErrorHandler() *delegator { +- return &delegator{ +- lock: &sync.RWMutex{}, +- eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}, +- } +-} +- +-// errLogger logs errors if no delegate is set, otherwise they are delegated. +-type errLogger struct { +- l *log.Logger +-} +- +-// Handle logs err if no delegate is set, otherwise it is delegated. +-func (h *errLogger) Handle(err error) { +- h.l.Print(err) +-} +- + // GetErrorHandler returns the global ErrorHandler instance. + // + // The default ErrorHandler instance returned will log all errors to STDERR +@@ -77,9 +34,7 @@ func (h *errLogger) Handle(err error) { + // + // Subsequent calls to SetErrorHandler after the first will not forward errors + // to the new ErrorHandler for prior returned instances. +-func GetErrorHandler() ErrorHandler { +- return globalErrorHandler +-} ++func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } + + // SetErrorHandler sets the global ErrorHandler to h. + // +@@ -87,11 +42,7 @@ func GetErrorHandler() ErrorHandler { + // GetErrorHandler will send errors to h instead of the default logging + // ErrorHandler. Subsequent calls will set the global ErrorHandler, but not + // delegate errors to h. +-func SetErrorHandler(h ErrorHandler) { +- globalErrorHandler.setDelegate(h) +-} ++func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } + + // Handle is a convenience function for ErrorHandler().Handle(err). +-func Handle(err error) { +- GetErrorHandler().Handle(err) +-} ++func Handle(err error) { global.Handle(err) } +diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +new file mode 100644 +index 00000000000..622c3ee3f27 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +@@ -0,0 +1,111 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++/* ++Package attribute provide several helper functions for some commonly used ++logic of processing attributes. ++*/ ++package attribute // import "go.opentelemetry.io/otel/internal/attribute" ++ ++import ( ++ "reflect" ++) ++ ++// BoolSliceValue converts a bool slice into an array with same elements as slice. ++func BoolSliceValue(v []bool) interface{} { ++ var zero bool ++ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) ++ copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) ++ return cp.Elem().Interface() ++} ++ ++// Int64SliceValue converts an int64 slice into an array with same elements as slice. ++func Int64SliceValue(v []int64) interface{} { ++ var zero int64 ++ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) ++ copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) ++ return cp.Elem().Interface() ++} ++ ++// Float64SliceValue converts a float64 slice into an array with same elements as slice. ++func Float64SliceValue(v []float64) interface{} { ++ var zero float64 ++ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) ++ copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) ++ return cp.Elem().Interface() ++} ++ ++// StringSliceValue converts a string slice into an array with same elements as slice. ++func StringSliceValue(v []string) interface{} { ++ var zero string ++ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) ++ copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) ++ return cp.Elem().Interface() ++} ++ ++// AsBoolSlice converts a bool array into a slice into with same elements as array. ++func AsBoolSlice(v interface{}) []bool { ++ rv := reflect.ValueOf(v) ++ if rv.Type().Kind() != reflect.Array { ++ return nil ++ } ++ var zero bool ++ correctLen := rv.Len() ++ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) ++ cpy := reflect.New(correctType) ++ _ = reflect.Copy(cpy.Elem(), rv) ++ return cpy.Elem().Slice(0, correctLen).Interface().([]bool) ++} ++ ++// AsInt64Slice converts an int64 array into a slice into with same elements as array. ++func AsInt64Slice(v interface{}) []int64 { ++ rv := reflect.ValueOf(v) ++ if rv.Type().Kind() != reflect.Array { ++ return nil ++ } ++ var zero int64 ++ correctLen := rv.Len() ++ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) ++ cpy := reflect.New(correctType) ++ _ = reflect.Copy(cpy.Elem(), rv) ++ return cpy.Elem().Slice(0, correctLen).Interface().([]int64) ++} ++ ++// AsFloat64Slice converts a float64 array into a slice into with same elements as array. ++func AsFloat64Slice(v interface{}) []float64 { ++ rv := reflect.ValueOf(v) ++ if rv.Type().Kind() != reflect.Array { ++ return nil ++ } ++ var zero float64 ++ correctLen := rv.Len() ++ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) ++ cpy := reflect.New(correctType) ++ _ = reflect.Copy(cpy.Elem(), rv) ++ return cpy.Elem().Slice(0, correctLen).Interface().([]float64) ++} ++ ++// AsStringSlice converts a string array into a slice into with same elements as array. ++func AsStringSlice(v interface{}) []string { ++ rv := reflect.ValueOf(v) ++ if rv.Type().Kind() != reflect.Array { ++ return nil ++ } ++ var zero string ++ correctLen := rv.Len() ++ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) ++ cpy := reflect.New(correctType) ++ _ = reflect.Copy(cpy.Elem(), rv) ++ return cpy.Elem().Slice(0, correctLen).Interface().([]string) ++} +diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go +new file mode 100644 +index 00000000000..f532f07e9e5 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/internal/gen.go +@@ -0,0 +1,29 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package internal // import "go.opentelemetry.io/otel/internal" ++ ++//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go ++//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go ++//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go ++ ++//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go ++//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go ++//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go ++//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go ++//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go ++//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go ++//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go ++//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go ++//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go +new file mode 100644 +index 00000000000..5e9b8304792 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go +@@ -0,0 +1,102 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package global // import "go.opentelemetry.io/otel/internal/global" ++ ++import ( ++ "log" ++ "os" ++ "sync/atomic" ++) ++ ++var ( ++ // GlobalErrorHandler provides an ErrorHandler that can be used ++ // throughout an OpenTelemetry instrumented project. When a user ++ // specified ErrorHandler is registered (`SetErrorHandler`) all calls to ++ // `Handle` and will be delegated to the registered ErrorHandler. ++ GlobalErrorHandler = defaultErrorHandler() ++ ++ // Compile-time check that delegator implements ErrorHandler. ++ _ ErrorHandler = (*ErrDelegator)(nil) ++ // Compile-time check that errLogger implements ErrorHandler. ++ _ ErrorHandler = (*ErrLogger)(nil) ++) ++ ++// ErrorHandler handles irremediable events. ++type ErrorHandler interface { ++ // Handle handles any error deemed irremediable by an OpenTelemetry ++ // component. ++ Handle(error) ++} ++ ++type ErrDelegator struct { ++ delegate atomic.Pointer[ErrorHandler] ++} ++ ++func (d *ErrDelegator) Handle(err error) { ++ d.getDelegate().Handle(err) ++} ++ ++func (d *ErrDelegator) getDelegate() ErrorHandler { ++ return *d.delegate.Load() ++} ++ ++// setDelegate sets the ErrorHandler delegate. ++func (d *ErrDelegator) setDelegate(eh ErrorHandler) { ++ d.delegate.Store(&eh) ++} ++ ++func defaultErrorHandler() *ErrDelegator { ++ d := &ErrDelegator{} ++ d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) ++ return d ++} ++ ++// ErrLogger logs errors if no delegate is set, otherwise they are delegated. ++type ErrLogger struct { ++ l *log.Logger ++} ++ ++// Handle logs err if no delegate is set, otherwise it is delegated. ++func (h *ErrLogger) Handle(err error) { ++ h.l.Print(err) ++} ++ ++// GetErrorHandler returns the global ErrorHandler instance. ++// ++// The default ErrorHandler instance returned will log all errors to STDERR ++// until an override ErrorHandler is set with SetErrorHandler. All ++// ErrorHandler returned prior to this will automatically forward errors to ++// the set instance instead of logging. ++// ++// Subsequent calls to SetErrorHandler after the first will not forward errors ++// to the new ErrorHandler for prior returned instances. ++func GetErrorHandler() ErrorHandler { ++ return GlobalErrorHandler ++} ++ ++// SetErrorHandler sets the global ErrorHandler to h. ++// ++// The first time this is called all ErrorHandler previously returned from ++// GetErrorHandler will send errors to h instead of the default logging ++// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not ++// delegate errors to h. ++func SetErrorHandler(h ErrorHandler) { ++ GlobalErrorHandler.setDelegate(h) ++} ++ ++// Handle is a convenience function for ErrorHandler().Handle(err). ++func Handle(err error) { ++ GetErrorHandler().Handle(err) ++} +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +new file mode 100644 +index 00000000000..ebb13c20678 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +@@ -0,0 +1,371 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package global // import "go.opentelemetry.io/otel/internal/global" ++ ++import ( ++ "context" ++ "sync/atomic" ++ ++ "go.opentelemetry.io/otel/metric" ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// unwrapper unwraps to return the underlying instrument implementation. ++type unwrapper interface { ++ Unwrap() metric.Observable ++} ++ ++type afCounter struct { ++ embedded.Float64ObservableCounter ++ metric.Float64Observable ++ ++ name string ++ opts []metric.Float64ObservableCounterOption ++ ++ delegate atomic.Value // metric.Float64ObservableCounter ++} ++ ++var ( ++ _ unwrapper = (*afCounter)(nil) ++ _ metric.Float64ObservableCounter = (*afCounter)(nil) ++) ++ ++func (i *afCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64ObservableCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *afCounter) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Float64ObservableCounter) ++ } ++ return nil ++} ++ ++type afUpDownCounter struct { ++ embedded.Float64ObservableUpDownCounter ++ metric.Float64Observable ++ ++ name string ++ opts []metric.Float64ObservableUpDownCounterOption ++ ++ delegate atomic.Value // metric.Float64ObservableUpDownCounter ++} ++ ++var ( ++ _ unwrapper = (*afUpDownCounter)(nil) ++ _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) ++) ++ ++func (i *afUpDownCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *afUpDownCounter) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Float64ObservableUpDownCounter) ++ } ++ return nil ++} ++ ++type afGauge struct { ++ embedded.Float64ObservableGauge ++ metric.Float64Observable ++ ++ name string ++ opts []metric.Float64ObservableGaugeOption ++ ++ delegate atomic.Value // metric.Float64ObservableGauge ++} ++ ++var ( ++ _ unwrapper = (*afGauge)(nil) ++ _ metric.Float64ObservableGauge = (*afGauge)(nil) ++) ++ ++func (i *afGauge) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64ObservableGauge(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *afGauge) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Float64ObservableGauge) ++ } ++ return nil ++} ++ ++type aiCounter struct { ++ embedded.Int64ObservableCounter ++ metric.Int64Observable ++ ++ name string ++ opts []metric.Int64ObservableCounterOption ++ ++ delegate atomic.Value // metric.Int64ObservableCounter ++} ++ ++var ( ++ _ unwrapper = (*aiCounter)(nil) ++ _ metric.Int64ObservableCounter = (*aiCounter)(nil) ++) ++ ++func (i *aiCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64ObservableCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *aiCounter) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Int64ObservableCounter) ++ } ++ return nil ++} ++ ++type aiUpDownCounter struct { ++ embedded.Int64ObservableUpDownCounter ++ metric.Int64Observable ++ ++ name string ++ opts []metric.Int64ObservableUpDownCounterOption ++ ++ delegate atomic.Value // metric.Int64ObservableUpDownCounter ++} ++ ++var ( ++ _ unwrapper = (*aiUpDownCounter)(nil) ++ _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) ++) ++ ++func (i *aiUpDownCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *aiUpDownCounter) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Int64ObservableUpDownCounter) ++ } ++ return nil ++} ++ ++type aiGauge struct { ++ embedded.Int64ObservableGauge ++ metric.Int64Observable ++ ++ name string ++ opts []metric.Int64ObservableGaugeOption ++ ++ delegate atomic.Value // metric.Int64ObservableGauge ++} ++ ++var ( ++ _ unwrapper = (*aiGauge)(nil) ++ _ metric.Int64ObservableGauge = (*aiGauge)(nil) ++) ++ ++func (i *aiGauge) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64ObservableGauge(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *aiGauge) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Int64ObservableGauge) ++ } ++ return nil ++} ++ ++// Sync Instruments. ++type sfCounter struct { ++ embedded.Float64Counter ++ ++ name string ++ opts []metric.Float64CounterOption ++ ++ delegate atomic.Value // metric.Float64Counter ++} ++ ++var _ metric.Float64Counter = (*sfCounter)(nil) ++ ++func (i *sfCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64Counter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Float64Counter).Add(ctx, incr, opts...) ++ } ++} ++ ++type sfUpDownCounter struct { ++ embedded.Float64UpDownCounter ++ ++ name string ++ opts []metric.Float64UpDownCounterOption ++ ++ delegate atomic.Value // metric.Float64UpDownCounter ++} ++ ++var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) ++ ++func (i *sfUpDownCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64UpDownCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) ++ } ++} ++ ++type sfHistogram struct { ++ embedded.Float64Histogram ++ ++ name string ++ opts []metric.Float64HistogramOption ++ ++ delegate atomic.Value // metric.Float64Histogram ++} ++ ++var _ metric.Float64Histogram = (*sfHistogram)(nil) ++ ++func (i *sfHistogram) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64Histogram(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Float64Histogram).Record(ctx, x, opts...) ++ } ++} ++ ++type siCounter struct { ++ embedded.Int64Counter ++ ++ name string ++ opts []metric.Int64CounterOption ++ ++ delegate atomic.Value // metric.Int64Counter ++} ++ ++var _ metric.Int64Counter = (*siCounter)(nil) ++ ++func (i *siCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64Counter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Int64Counter).Add(ctx, x, opts...) ++ } ++} ++ ++type siUpDownCounter struct { ++ embedded.Int64UpDownCounter ++ ++ name string ++ opts []metric.Int64UpDownCounterOption ++ ++ delegate atomic.Value // metric.Int64UpDownCounter ++} ++ ++var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) ++ ++func (i *siUpDownCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64UpDownCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) ++ } ++} ++ ++type siHistogram struct { ++ embedded.Int64Histogram ++ ++ name string ++ opts []metric.Int64HistogramOption ++ ++ delegate atomic.Value // metric.Int64Histogram ++} ++ ++var _ metric.Int64Histogram = (*siHistogram)(nil) ++ ++func (i *siHistogram) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64Histogram(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Int64Histogram).Record(ctx, x, opts...) ++ } ++} +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +index ccb3258711a..c6f305a2b76 100644 +--- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go ++++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +@@ -17,47 +17,53 @@ package global // import "go.opentelemetry.io/otel/internal/global" + import ( + "log" + "os" +- "sync" ++ "sync/atomic" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" + ) + +-// globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals. ++// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. + // + // The default logger uses stdr which is backed by the standard `log.Logger` + // interface. This logger will only show messages at the Error Level. +-var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) +-var globalLoggerLock = &sync.RWMutex{} ++var globalLogger atomic.Pointer[logr.Logger] ++ ++func init() { ++ SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) ++} + + // SetLogger overrides the globalLogger with l. + // +-// To see Info messages use a logger with `l.V(1).Enabled() == true` +-// To see Debug messages use a logger with `l.V(5).Enabled() == true`. ++// To see Warn messages use a logger with `l.V(1).Enabled() == true` ++// To see Info messages use a logger with `l.V(4).Enabled() == true` ++// To see Debug messages use a logger with `l.V(8).Enabled() == true`. + func SetLogger(l logr.Logger) { +- globalLoggerLock.Lock() +- defer globalLoggerLock.Unlock() +- globalLogger = l ++ globalLogger.Store(&l) ++} ++ ++func getLogger() logr.Logger { ++ return *globalLogger.Load() + } + + // Info prints messages about the general state of the API or SDK. +-// This should usually be less then 5 messages a minute. ++// This should usually be less than 5 messages a minute. + func Info(msg string, keysAndValues ...interface{}) { +- globalLoggerLock.RLock() +- defer globalLoggerLock.RUnlock() +- globalLogger.V(1).Info(msg, keysAndValues...) ++ getLogger().V(4).Info(msg, keysAndValues...) + } + + // Error prints messages about exceptional states of the API or SDK. + func Error(err error, msg string, keysAndValues ...interface{}) { +- globalLoggerLock.RLock() +- defer globalLoggerLock.RUnlock() +- globalLogger.Error(err, msg, keysAndValues...) ++ getLogger().Error(err, msg, keysAndValues...) + } + + // Debug prints messages about all internal changes in the API or SDK. + func Debug(msg string, keysAndValues ...interface{}) { +- globalLoggerLock.RLock() +- defer globalLoggerLock.RUnlock() +- globalLogger.V(5).Info(msg, keysAndValues...) ++ getLogger().V(8).Info(msg, keysAndValues...) ++} ++ ++// Warn prints messages about warnings in the API or SDK. ++// Not an error but is likely more important than an informational event. ++func Warn(msg string, keysAndValues ...interface{}) { ++ getLogger().V(1).Info(msg, keysAndValues...) + } +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go +new file mode 100644 +index 00000000000..0097db478c6 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go +@@ -0,0 +1,354 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package global // import "go.opentelemetry.io/otel/internal/global" ++ ++import ( ++ "container/list" ++ "sync" ++ "sync/atomic" ++ ++ "go.opentelemetry.io/otel/metric" ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// meterProvider is a placeholder for a configured SDK MeterProvider. ++// ++// All MeterProvider functionality is forwarded to a delegate once ++// configured. ++type meterProvider struct { ++ embedded.MeterProvider ++ ++ mtx sync.Mutex ++ meters map[il]*meter ++ ++ delegate metric.MeterProvider ++} ++ ++// setDelegate configures p to delegate all MeterProvider functionality to ++// provider. ++// ++// All Meters provided prior to this function call are switched out to be ++// Meters provided by provider. All instruments and callbacks are recreated and ++// delegated. ++// ++// It is guaranteed by the caller that this happens only once. ++func (p *meterProvider) setDelegate(provider metric.MeterProvider) { ++ p.mtx.Lock() ++ defer p.mtx.Unlock() ++ ++ p.delegate = provider ++ ++ if len(p.meters) == 0 { ++ return ++ } ++ ++ for _, meter := range p.meters { ++ meter.setDelegate(provider) ++ } ++ ++ p.meters = nil ++} ++ ++// Meter implements MeterProvider. ++func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { ++ p.mtx.Lock() ++ defer p.mtx.Unlock() ++ ++ if p.delegate != nil { ++ return p.delegate.Meter(name, opts...) ++ } ++ ++ // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. ++ ++ c := metric.NewMeterConfig(opts...) ++ key := il{ ++ name: name, ++ version: c.InstrumentationVersion(), ++ } ++ ++ if p.meters == nil { ++ p.meters = make(map[il]*meter) ++ } ++ ++ if val, ok := p.meters[key]; ok { ++ return val ++ } ++ ++ t := &meter{name: name, opts: opts} ++ p.meters[key] = t ++ return t ++} ++ ++// meter is a placeholder for a metric.Meter. ++// ++// All Meter functionality is forwarded to a delegate once configured. ++// Otherwise, all functionality is forwarded to a NoopMeter. ++type meter struct { ++ embedded.Meter ++ ++ name string ++ opts []metric.MeterOption ++ ++ mtx sync.Mutex ++ instruments []delegatedInstrument ++ ++ registry list.List ++ ++ delegate atomic.Value // metric.Meter ++} ++ ++type delegatedInstrument interface { ++ setDelegate(metric.Meter) ++} ++ ++// setDelegate configures m to delegate all Meter functionality to Meters ++// created by provider. ++// ++// All subsequent calls to the Meter methods will be passed to the delegate. ++// ++// It is guaranteed by the caller that this happens only once. ++func (m *meter) setDelegate(provider metric.MeterProvider) { ++ meter := provider.Meter(m.name, m.opts...) ++ m.delegate.Store(meter) ++ ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ ++ for _, inst := range m.instruments { ++ inst.setDelegate(meter) ++ } ++ ++ for e := m.registry.Front(); e != nil; e = e.Next() { ++ r := e.Value.(*registration) ++ r.setDelegate(meter) ++ m.registry.Remove(e) ++ } ++ ++ m.instruments = nil ++ m.registry.Init() ++} ++ ++func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64Counter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &siCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64UpDownCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &siUpDownCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64Histogram(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &siHistogram{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64ObservableCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &aiCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64ObservableUpDownCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &aiUpDownCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64ObservableGauge(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &aiGauge{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64Counter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &sfCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64UpDownCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &sfUpDownCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64Histogram(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &sfHistogram{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64ObservableCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &afCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64ObservableUpDownCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &afUpDownCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64ObservableGauge(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &afGauge{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++// RegisterCallback captures the function that will be called during Collect. ++func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ insts = unwrapInstruments(insts) ++ return del.RegisterCallback(f, insts...) ++ } ++ ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ ++ reg := ®istration{instruments: insts, function: f} ++ e := m.registry.PushBack(reg) ++ reg.unreg = func() error { ++ m.mtx.Lock() ++ _ = m.registry.Remove(e) ++ m.mtx.Unlock() ++ return nil ++ } ++ return reg, nil ++} ++ ++type wrapped interface { ++ unwrap() metric.Observable ++} ++ ++func unwrapInstruments(instruments []metric.Observable) []metric.Observable { ++ out := make([]metric.Observable, 0, len(instruments)) ++ ++ for _, inst := range instruments { ++ if in, ok := inst.(wrapped); ok { ++ out = append(out, in.unwrap()) ++ } else { ++ out = append(out, inst) ++ } ++ } ++ ++ return out ++} ++ ++type registration struct { ++ embedded.Registration ++ ++ instruments []metric.Observable ++ function metric.Callback ++ ++ unreg func() error ++ unregMu sync.Mutex ++} ++ ++func (c *registration) setDelegate(m metric.Meter) { ++ insts := unwrapInstruments(c.instruments) ++ ++ c.unregMu.Lock() ++ defer c.unregMu.Unlock() ++ ++ if c.unreg == nil { ++ // Unregister already called. ++ return ++ } ++ ++ reg, err := m.RegisterCallback(c.function, insts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ } ++ ++ c.unreg = reg.Unregister ++} ++ ++func (c *registration) Unregister() error { ++ c.unregMu.Lock() ++ defer c.unregMu.Unlock() ++ if c.unreg == nil { ++ // Unregister already called. ++ return nil ++ } ++ ++ var err error ++ err, c.unreg = c.unreg(), nil ++ return err ++} +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go +index 1ad38f828ec..7985005bcb6 100644 +--- a/vendor/go.opentelemetry.io/otel/internal/global/state.go ++++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go +@@ -19,6 +19,7 @@ import ( + "sync" + "sync/atomic" + ++ "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" + ) +@@ -31,14 +32,20 @@ type ( + propagatorsHolder struct { + tm propagation.TextMapPropagator + } ++ ++ meterProviderHolder struct { ++ mp metric.MeterProvider ++ } + ) + + var ( +- globalTracer = defaultTracerValue() +- globalPropagators = defaultPropagatorsValue() ++ globalTracer = defaultTracerValue() ++ globalPropagators = defaultPropagatorsValue() ++ globalMeterProvider = defaultMeterProvider() + + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once ++ delegateMeterOnce sync.Once + ) + + // TracerProvider is the internal implementation for global.TracerProvider. +@@ -102,6 +109,34 @@ func SetTextMapPropagator(p propagation.TextMapPropagator) { + globalPropagators.Store(propagatorsHolder{tm: p}) + } + ++// MeterProvider is the internal implementation for global.MeterProvider. ++func MeterProvider() metric.MeterProvider { ++ return globalMeterProvider.Load().(meterProviderHolder).mp ++} ++ ++// SetMeterProvider is the internal implementation for global.SetMeterProvider. ++func SetMeterProvider(mp metric.MeterProvider) { ++ current := MeterProvider() ++ if _, cOk := current.(*meterProvider); cOk { ++ if _, mpOk := mp.(*meterProvider); mpOk && current == mp { ++ // Do not assign the default delegating MeterProvider to delegate ++ // to itself. ++ Error( ++ errors.New("no delegate configured in meter provider"), ++ "Setting meter provider to it's current value. No delegate will be configured", ++ ) ++ return ++ } ++ } ++ ++ delegateMeterOnce.Do(func() { ++ if def, ok := current.(*meterProvider); ok { ++ def.setDelegate(mp) ++ } ++ }) ++ globalMeterProvider.Store(meterProviderHolder{mp: mp}) ++} ++ + func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) +@@ -113,3 +148,9 @@ func defaultPropagatorsValue() *atomic.Value { + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v + } ++ ++func defaultMeterProvider() *atomic.Value { ++ v := &atomic.Value{} ++ v.Store(meterProviderHolder{mp: &meterProvider{}}) ++ return v ++} +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go +index 5f008d0982b..3f61ec12a34 100644 +--- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go ++++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go +@@ -39,6 +39,7 @@ import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" ++ "go.opentelemetry.io/otel/trace/embedded" + ) + + // tracerProvider is a placeholder for a configured SDK TracerProvider. +@@ -46,6 +47,8 @@ import ( + // All TracerProvider functionality is forwarded to a delegate once + // configured. + type tracerProvider struct { ++ embedded.TracerProvider ++ + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +@@ -119,6 +122,8 @@ type il struct { + // All Tracer functionality is forwarded to a delegate once configured. + // Otherwise, all functionality is forwarded to a NoopTracer. + type tracer struct { ++ embedded.Tracer ++ + name string + opts []trace.TracerOption + provider *tracerProvider +@@ -156,6 +161,8 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart + // SpanContext. It performs no operations other than to return the wrapped + // SpanContext. + type nonRecordingSpan struct { ++ embedded.Span ++ + sc trace.SpanContext + tracer *tracer + } +diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go +new file mode 100644 +index 00000000000..f955171951f +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric.go +@@ -0,0 +1,53 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package otel // import "go.opentelemetry.io/otel" ++ ++import ( ++ "go.opentelemetry.io/otel/internal/global" ++ "go.opentelemetry.io/otel/metric" ++) ++ ++// Meter returns a Meter from the global MeterProvider. The name must be the ++// name of the library providing instrumentation. This name may be the same as ++// the instrumented code only if that code provides built-in instrumentation. ++// If the name is empty, then a implementation defined default name will be ++// used instead. ++// ++// If this is called before a global MeterProvider is registered the returned ++// Meter will be a No-op implementation of a Meter. When a global MeterProvider ++// is registered for the first time, the returned Meter, and all the ++// instruments it has created or will create, are recreated automatically from ++// the new MeterProvider. ++// ++// This is short for GetMeterProvider().Meter(name). ++func Meter(name string, opts ...metric.MeterOption) metric.Meter { ++ return GetMeterProvider().Meter(name, opts...) ++} ++ ++// GetMeterProvider returns the registered global meter provider. ++// ++// If no global GetMeterProvider has been registered, a No-op GetMeterProvider ++// implementation is returned. When a global GetMeterProvider is registered for ++// the first time, the returned GetMeterProvider, and all the Meters it has ++// created or will create, are recreated automatically from the new ++// GetMeterProvider. ++func GetMeterProvider() metric.MeterProvider { ++ return global.MeterProvider() ++} ++ ++// SetMeterProvider registers mp as the global MeterProvider. ++func SetMeterProvider(mp metric.MeterProvider) { ++ global.SetMeterProvider(mp) ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +new file mode 100644 +index 00000000000..072baa8e8d0 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +@@ -0,0 +1,271 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package metric // import "go.opentelemetry.io/otel/metric" ++ ++import ( ++ "context" ++ ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// Float64Observable describes a set of instruments used asynchronously to ++// record float64 measurements once per collection cycle. Observations of ++// these instruments are only made within a callback. ++// ++// Warning: Methods may be added to this interface in minor releases. ++type Float64Observable interface { ++ Observable ++ ++ float64Observable() ++} ++ ++// Float64ObservableCounter is an instrument used to asynchronously record ++// increasing float64 measurements once per collection cycle. Observations are ++// only made within a callback for this instrument. The value observed is ++// assumed the to be the cumulative sum of the count. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for ++// unimplemented methods. ++type Float64ObservableCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64ObservableCounter ++ ++ Float64Observable ++} ++ ++// Float64ObservableCounterConfig contains options for asynchronous counter ++// instruments that record int64 values. ++type Float64ObservableCounterConfig struct { ++ description string ++ unit string ++ callbacks []Float64Callback ++} ++ ++// NewFloat64ObservableCounterConfig returns a new ++// [Float64ObservableCounterConfig] with all opts applied. ++func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { ++ var config Float64ObservableCounterConfig ++ for _, o := range opts { ++ config = o.applyFloat64ObservableCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64ObservableCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64ObservableCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { ++ return c.callbacks ++} ++ ++// Float64ObservableCounterOption applies options to a ++// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and ++// [InstrumentOption] for other options that can be used as a ++// Float64ObservableCounterOption. ++type Float64ObservableCounterOption interface { ++ applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig ++} ++ ++// Float64ObservableUpDownCounter is an instrument used to asynchronously ++// record float64 measurements once per collection cycle. Observations are only ++// made within a callback for this instrument. The value observed is assumed ++// the to be the cumulative sum of the count. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64ObservableUpDownCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64ObservableUpDownCounter ++ ++ Float64Observable ++} ++ ++// Float64ObservableUpDownCounterConfig contains options for asynchronous ++// counter instruments that record int64 values. ++type Float64ObservableUpDownCounterConfig struct { ++ description string ++ unit string ++ callbacks []Float64Callback ++} ++ ++// NewFloat64ObservableUpDownCounterConfig returns a new ++// [Float64ObservableUpDownCounterConfig] with all opts applied. ++func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { ++ var config Float64ObservableUpDownCounterConfig ++ for _, o := range opts { ++ config = o.applyFloat64ObservableUpDownCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64ObservableUpDownCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64ObservableUpDownCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { ++ return c.callbacks ++} ++ ++// Float64ObservableUpDownCounterOption applies options to a ++// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and ++// [InstrumentOption] for other options that can be used as a ++// Float64ObservableUpDownCounterOption. ++type Float64ObservableUpDownCounterOption interface { ++ applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig ++} ++ ++// Float64ObservableGauge is an instrument used to asynchronously record ++// instantaneous float64 measurements once per collection cycle. Observations ++// are only made within a callback for this instrument. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64ObservableGauge interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64ObservableGauge ++ ++ Float64Observable ++} ++ ++// Float64ObservableGaugeConfig contains options for asynchronous counter ++// instruments that record int64 values. ++type Float64ObservableGaugeConfig struct { ++ description string ++ unit string ++ callbacks []Float64Callback ++} ++ ++// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] ++// with all opts applied. ++func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { ++ var config Float64ObservableGaugeConfig ++ for _, o := range opts { ++ config = o.applyFloat64ObservableGauge(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64ObservableGaugeConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64ObservableGaugeConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { ++ return c.callbacks ++} ++ ++// Float64ObservableGaugeOption applies options to a ++// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and ++// [InstrumentOption] for other options that can be used as a ++// Float64ObservableGaugeOption. ++type Float64ObservableGaugeOption interface { ++ applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig ++} ++ ++// Float64Observer is a recorder of float64 measurements. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64Observer interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64Observer ++ ++ // Observe records the float64 value. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Observe(value float64, options ...ObserveOption) ++} ++ ++// Float64Callback is a function registered with a Meter that makes ++// observations for a Float64Observerable instrument it is registered with. ++// Calls to the Float64Observer record measurement values for the ++// Float64Observable. ++// ++// The function needs to complete in a finite amount of time and the deadline ++// of the passed context is expected to be honored. ++// ++// The function needs to make unique observations across all registered ++// Float64Callbacks. Meaning, it should not report measurements with the same ++// attributes as another Float64Callbacks also registered for the same ++// instrument. ++// ++// The function needs to be concurrent safe. ++type Float64Callback func(context.Context, Float64Observer) error ++ ++// Float64ObservableOption applies options to float64 Observer instruments. ++type Float64ObservableOption interface { ++ Float64ObservableCounterOption ++ Float64ObservableUpDownCounterOption ++ Float64ObservableGaugeOption ++} ++ ++type float64CallbackOpt struct { ++ cback Float64Callback ++} ++ ++func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++// WithFloat64Callback adds callback to be called for an instrument. ++func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { ++ return float64CallbackOpt{callback} ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +new file mode 100644 +index 00000000000..9bd6ebf0205 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +@@ -0,0 +1,269 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package metric // import "go.opentelemetry.io/otel/metric" ++ ++import ( ++ "context" ++ ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// Int64Observable describes a set of instruments used asynchronously to record ++// int64 measurements once per collection cycle. Observations of these ++// instruments are only made within a callback. ++// ++// Warning: Methods may be added to this interface in minor releases. ++type Int64Observable interface { ++ Observable ++ ++ int64Observable() ++} ++ ++// Int64ObservableCounter is an instrument used to asynchronously record ++// increasing int64 measurements once per collection cycle. Observations are ++// only made within a callback for this instrument. The value observed is ++// assumed the to be the cumulative sum of the count. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64ObservableCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64ObservableCounter ++ ++ Int64Observable ++} ++ ++// Int64ObservableCounterConfig contains options for asynchronous counter ++// instruments that record int64 values. ++type Int64ObservableCounterConfig struct { ++ description string ++ unit string ++ callbacks []Int64Callback ++} ++ ++// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] ++// with all opts applied. ++func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { ++ var config Int64ObservableCounterConfig ++ for _, o := range opts { ++ config = o.applyInt64ObservableCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64ObservableCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64ObservableCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { ++ return c.callbacks ++} ++ ++// Int64ObservableCounterOption applies options to a ++// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and ++// [InstrumentOption] for other options that can be used as an ++// Int64ObservableCounterOption. ++type Int64ObservableCounterOption interface { ++ applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig ++} ++ ++// Int64ObservableUpDownCounter is an instrument used to asynchronously record ++// int64 measurements once per collection cycle. Observations are only made ++// within a callback for this instrument. The value observed is assumed the to ++// be the cumulative sum of the count. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64ObservableUpDownCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64ObservableUpDownCounter ++ ++ Int64Observable ++} ++ ++// Int64ObservableUpDownCounterConfig contains options for asynchronous counter ++// instruments that record int64 values. ++type Int64ObservableUpDownCounterConfig struct { ++ description string ++ unit string ++ callbacks []Int64Callback ++} ++ ++// NewInt64ObservableUpDownCounterConfig returns a new ++// [Int64ObservableUpDownCounterConfig] with all opts applied. ++func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { ++ var config Int64ObservableUpDownCounterConfig ++ for _, o := range opts { ++ config = o.applyInt64ObservableUpDownCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64ObservableUpDownCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64ObservableUpDownCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { ++ return c.callbacks ++} ++ ++// Int64ObservableUpDownCounterOption applies options to a ++// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and ++// [InstrumentOption] for other options that can be used as an ++// Int64ObservableUpDownCounterOption. ++type Int64ObservableUpDownCounterOption interface { ++ applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig ++} ++ ++// Int64ObservableGauge is an instrument used to asynchronously record ++// instantaneous int64 measurements once per collection cycle. Observations are ++// only made within a callback for this instrument. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64ObservableGauge interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64ObservableGauge ++ ++ Int64Observable ++} ++ ++// Int64ObservableGaugeConfig contains options for asynchronous counter ++// instruments that record int64 values. ++type Int64ObservableGaugeConfig struct { ++ description string ++ unit string ++ callbacks []Int64Callback ++} ++ ++// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] ++// with all opts applied. ++func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { ++ var config Int64ObservableGaugeConfig ++ for _, o := range opts { ++ config = o.applyInt64ObservableGauge(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64ObservableGaugeConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64ObservableGaugeConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { ++ return c.callbacks ++} ++ ++// Int64ObservableGaugeOption applies options to a ++// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and ++// [InstrumentOption] for other options that can be used as an ++// Int64ObservableGaugeOption. ++type Int64ObservableGaugeOption interface { ++ applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig ++} ++ ++// Int64Observer is a recorder of int64 measurements. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64Observer interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64Observer ++ ++ // Observe records the int64 value. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Observe(value int64, options ...ObserveOption) ++} ++ ++// Int64Callback is a function registered with a Meter that makes observations ++// for an Int64Observerable instrument it is registered with. Calls to the ++// Int64Observer record measurement values for the Int64Observable. ++// ++// The function needs to complete in a finite amount of time and the deadline ++// of the passed context is expected to be honored. ++// ++// The function needs to make unique observations across all registered ++// Int64Callbacks. Meaning, it should not report measurements with the same ++// attributes as another Int64Callbacks also registered for the same ++// instrument. ++// ++// The function needs to be concurrent safe. ++type Int64Callback func(context.Context, Int64Observer) error ++ ++// Int64ObservableOption applies options to int64 Observer instruments. ++type Int64ObservableOption interface { ++ Int64ObservableCounterOption ++ Int64ObservableUpDownCounterOption ++ Int64ObservableGaugeOption ++} ++ ++type int64CallbackOpt struct { ++ cback Int64Callback ++} ++ ++func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++// WithInt64Callback adds callback to be called for an instrument. ++func WithInt64Callback(callback Int64Callback) Int64ObservableOption { ++ return int64CallbackOpt{callback} ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go +index 621e4c5fcb8..778ad2d748b 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/config.go ++++ b/vendor/go.opentelemetry.io/otel/metric/config.go +@@ -14,17 +14,30 @@ + + package metric // import "go.opentelemetry.io/otel/metric" + ++import "go.opentelemetry.io/otel/attribute" ++ + // MeterConfig contains options for Meters. + type MeterConfig struct { + instrumentationVersion string + schemaURL string ++ attrs attribute.Set ++ ++ // Ensure forward compatibility by explicitly making this not comparable. ++ noCmp [0]func() //nolint: unused // This is indeed used. + } + +-// InstrumentationVersion is the version of the library providing instrumentation. ++// InstrumentationVersion returns the version of the library providing ++// instrumentation. + func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion + } + ++// InstrumentationAttributes returns the attributes associated with the library ++// providing instrumentation. ++func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { ++ return cfg.attrs ++} ++ + // SchemaURL is the schema_url of the library providing instrumentation. + func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +@@ -60,6 +73,16 @@ func WithInstrumentationVersion(version string) MeterOption { + }) + } + ++// WithInstrumentationAttributes sets the instrumentation attributes. ++// ++// The passed attributes will be de-duplicated. ++func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { ++ return meterOptionFunc(func(config MeterConfig) MeterConfig { ++ config.attrs = attribute.NewSet(attr...) ++ return config ++ }) ++} ++ + // WithSchemaURL sets the schema URL. + func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { +diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go +index bd6f4343720..54716e13b35 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/doc.go ++++ b/vendor/go.opentelemetry.io/otel/metric/doc.go +@@ -13,11 +13,158 @@ + // limitations under the License. + + /* +-Package metric provides an implementation of the metrics part of the +-OpenTelemetry API. ++Package metric provides the OpenTelemetry API used to measure metrics about ++source code operation. + +-This package is currently in a pre-GA phase. Backwards incompatible changes +-may be introduced in subsequent minor version releases as we work to track the +-evolving OpenTelemetry specification and user feedback. ++This API is separate from its implementation so the instrumentation built from ++it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official ++OpenTelemetry implementation of this API. ++ ++All measurements made with this package are made via instruments. These ++instruments are created by a [Meter] which itself is created by a ++[MeterProvider]. Applications need to accept a [MeterProvider] implementation ++as a starting point when instrumenting. This can be done directly, or by using ++the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an ++appropriately named [Meter] from the accepted [MeterProvider], instrumentation ++can then be built from the [Meter]'s instruments. ++ ++# Instruments ++ ++Each instrument is designed to make measurements of a particular type. Broadly, ++all instruments fall into two overlapping logical categories: asynchronous or ++synchronous, and int64 or float64. ++ ++All synchronous instruments ([Int64Counter], [Int64UpDownCounter], ++[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and ++[Float64Histogram]) are used to measure the operation and performance of source ++code during the source code execution. These instruments only make measurements ++when the source code they instrument is run. ++ ++All asynchronous instruments ([Int64ObservableCounter], ++[Int64ObservableUpDownCounter], [Int64ObservableGauge], ++[Float64ObservableCounter], [Float64ObservableUpDownCounter], and ++[Float64ObservableGauge]) are used to measure metrics outside of the execution ++of source code. They are said to make "observations" via a callback function ++called once every measurement collection cycle. ++ ++Each instrument is also grouped by the value type it measures. Either int64 or ++float64. The value being measured will dictate which instrument in these ++categories to use. ++ ++Outside of these two broad categories, instruments are described by the ++function they are designed to serve. All Counters ([Int64Counter], ++[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are ++designed to measure values that never decrease in value, but instead only ++incrementally increase in value. UpDownCounters ([Int64UpDownCounter], ++[Float64UpDownCounter], [Int64ObservableUpDownCounter], and ++[Float64ObservableUpDownCounter]) on the other hand, are designed to measure ++values that can increase and decrease. When more information needs to be ++conveyed about all the synchronous measurements made during a collection cycle, ++a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, ++when just the most recent measurement needs to be conveyed about an ++asynchronous measurement, a Gauge ([Int64ObservableGauge] and ++[Float64ObservableGauge]) should be used. ++ ++See the [OpenTelemetry documentation] for more information about instruments ++and their intended use. ++ ++# Measurements ++ ++Measurements are made by recording values and information about the values with ++an instrument. How these measurements are recorded depends on the instrument. ++ ++Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], ++[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and ++[Float64Histogram]) are recorded using the instrument methods directly. All ++counter instruments have an Add method that is used to measure an increment ++value, and all histogram instruments have a Record method to measure a data ++point. ++ ++Asynchronous instruments ([Int64ObservableCounter], ++[Int64ObservableUpDownCounter], [Int64ObservableGauge], ++[Float64ObservableCounter], [Float64ObservableUpDownCounter], and ++[Float64ObservableGauge]) record measurements within a callback function. The ++callback is registered with the Meter which ensures the callback is called once ++per collection cycle. A callback can be registered two ways: during the ++instrument's creation using an option, or later using the RegisterCallback ++method of the [Meter] that created the instrument. ++ ++If the following criteria are met, an option ([WithInt64Callback] or ++[WithFloat64Callback]) can be used during the asynchronous instrument's ++creation to register a callback ([Int64Callback] or [Float64Callback], ++respectively): ++ ++ - The measurement process is known when the instrument is created ++ - Only that instrument will make a measurement within the callback ++ - The callback never needs to be unregistered ++ ++If the criteria are not met, use the RegisterCallback method of the [Meter] that ++created the instrument to register a [Callback]. ++ ++# API Implementations ++ ++This package does not conform to the standard Go versioning policy, all of its ++interfaces may have methods added to them without a package major version bump. ++This non-standard API evolution could surprise an uninformed implementation ++author. They could unknowingly build their implementation in a way that would ++result in a runtime panic for their users that update to the new API. ++ ++The API is designed to help inform an instrumentation author about this ++non-standard API evolution. It requires them to choose a default behavior for ++unimplemented interface methods. There are three behavior choices they can ++make: ++ ++ - Compilation failure ++ - Panic ++ - Default to another implementation ++ ++All interfaces in this API embed a corresponding interface from ++[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default ++behavior of their implementations to be a compilation failure, signaling to ++their users they need to update to the latest version of that implementation, ++they need to embed the corresponding interface from ++[go.opentelemetry.io/otel/metric/embedded] in their implementation. For ++example, ++ ++ import "go.opentelemetry.io/otel/metric/embedded" ++ ++ type MeterProvider struct { ++ embedded.MeterProvider ++ // ... ++ } ++ ++If an author wants the default behavior of their implementations to a panic, ++they need to embed the API interface directly. ++ ++ import "go.opentelemetry.io/otel/metric" ++ ++ type MeterProvider struct { ++ metric.MeterProvider ++ // ... ++ } ++ ++This is not a recommended behavior as it could lead to publishing packages that ++contain runtime panics when users update other package that use newer versions ++of [go.opentelemetry.io/otel/metric]. ++ ++Finally, an author can embed another implementation in theirs. The embedded ++implementation will be used for methods not defined by the author. For example, ++an author who wants to default to silently dropping the call can use ++[go.opentelemetry.io/otel/metric/noop]: ++ ++ import "go.opentelemetry.io/otel/metric/noop" ++ ++ type MeterProvider struct { ++ noop.MeterProvider ++ // ... ++ } ++ ++It is strongly recommended that authors only embed ++[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. ++That implementation is the only one OpenTelemetry authors can guarantee will ++fully implement all the API interfaces when a user updates their API. ++ ++[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ ++[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider + */ + package metric // import "go.opentelemetry.io/otel/metric" +diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +new file mode 100644 +index 00000000000..ae0bdbd2e64 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +@@ -0,0 +1,234 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package embedded provides interfaces embedded within the [OpenTelemetry ++// metric API]. ++// ++// Implementers of the [OpenTelemetry metric API] can embed the relevant type ++// from this package into their implementation directly. Doing so will result ++// in a compilation error for users when the [OpenTelemetry metric API] is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++// ++// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric ++package embedded // import "go.opentelemetry.io/otel/metric/embedded" ++ ++// MeterProvider is embedded in ++// [go.opentelemetry.io/otel/metric.MeterProvider]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to ++// experience a compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type MeterProvider interface{ meterProvider() } ++ ++// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a ++// compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface ++// is extended (which is something that can happen without a major version bump ++// of the API package). ++type Meter interface{ meter() } ++ ++// Float64Observer is embedded in ++// [go.opentelemetry.io/otel/metric.Float64Observer]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64Observer] if you want ++// users to experience a compilation error, signaling they need to update to ++// your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64Observer] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Float64Observer interface{ float64Observer() } ++ ++// Int64Observer is embedded in ++// [go.opentelemetry.io/otel/metric.Int64Observer]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users ++// to experience a compilation error, signaling they need to update to your ++// latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64Observer] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Int64Observer interface{ int64Observer() } ++ ++// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a ++// compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/metric.Observer] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Observer interface{ observer() } ++ ++// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Registration] if you want users to ++// experience a compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/metric.Registration] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Registration interface{ registration() } ++ ++// Float64Counter is embedded in ++// [go.opentelemetry.io/otel/metric.Float64Counter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64Counter] if you want ++// users to experience a compilation error, signaling they need to update to ++// your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64Counter] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Float64Counter interface{ float64Counter() } ++ ++// Float64Histogram is embedded in ++// [go.opentelemetry.io/otel/metric.Float64Histogram]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want ++// users to experience a compilation error, signaling they need to update to ++// your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Float64Histogram interface{ float64Histogram() } ++ ++// Float64ObservableCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you ++// want users to experience a compilation error, signaling they need to update ++// to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Float64ObservableCounter interface{ float64ObservableCounter() } ++ ++// Float64ObservableGauge is embedded in ++// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you ++// want users to experience a compilation error, signaling they need to update ++// to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Float64ObservableGauge interface{ float64ObservableGauge() } ++ ++// Float64ObservableUpDownCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] ++// if you want users to experience a compilation error, signaling they need to ++// update to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } ++ ++// Float64UpDownCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you ++// want users to experience a compilation error, signaling they need to update ++// to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface ++// is extended (which is something that can happen without a major version bump ++// of the API package). ++type Float64UpDownCounter interface{ float64UpDownCounter() } ++ ++// Int64Counter is embedded in ++// [go.opentelemetry.io/otel/metric.Int64Counter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users ++// to experience a compilation error, signaling they need to update to your ++// latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64Counter] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Int64Counter interface{ int64Counter() } ++ ++// Int64Histogram is embedded in ++// [go.opentelemetry.io/otel/metric.Int64Histogram]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want ++// users to experience a compilation error, signaling they need to update to ++// your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Int64Histogram interface{ int64Histogram() } ++ ++// Int64ObservableCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you ++// want users to experience a compilation error, signaling they need to update ++// to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Int64ObservableCounter interface{ int64ObservableCounter() } ++ ++// Int64ObservableGauge is embedded in ++// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you ++// want users to experience a compilation error, signaling they need to update ++// to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface ++// is extended (which is something that can happen without a major version bump ++// of the API package). ++type Int64ObservableGauge interface{ int64ObservableGauge() } ++ ++// Int64ObservableUpDownCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if ++// you want users to experience a compilation error, signaling they need to ++// update to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } ++ ++// Int64UpDownCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want ++// users to experience a compilation error, signaling they need to update to ++// your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Int64UpDownCounter interface{ int64UpDownCounter() } +diff --git a/vendor/go.opentelemetry.io/otel/metric/global/global.go b/vendor/go.opentelemetry.io/otel/metric/global/global.go +deleted file mode 100644 +index 05a67c2e999..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/global/global.go ++++ /dev/null +@@ -1,42 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package global // import "go.opentelemetry.io/otel/metric/global" +- +-import ( +- "go.opentelemetry.io/otel/metric" +- "go.opentelemetry.io/otel/metric/internal/global" +-) +- +-// Meter returns a Meter from the global MeterProvider. The +-// instrumentationName must be the name of the library providing +-// instrumentation. This name may be the same as the instrumented code only if +-// that code provides built-in instrumentation. If the instrumentationName is +-// empty, then a implementation defined default name will be used instead. +-// +-// This is short for MeterProvider().Meter(name). +-func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { +- return MeterProvider().Meter(instrumentationName, opts...) +-} +- +-// MeterProvider returns the registered global trace provider. +-// If none is registered then a No-op MeterProvider is returned. +-func MeterProvider() metric.MeterProvider { +- return global.MeterProvider() +-} +- +-// SetMeterProvider registers `mp` as the global meter provider. +-func SetMeterProvider(mp metric.MeterProvider) { +- global.SetMeterProvider(mp) +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go +new file mode 100644 +index 00000000000..be89cd53341 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go +@@ -0,0 +1,357 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package metric // import "go.opentelemetry.io/otel/metric" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// Observable is used as a grouping mechanism for all instruments that are ++// updated within a Callback. ++type Observable interface { ++ observable() ++} ++ ++// InstrumentOption applies options to all instruments. ++type InstrumentOption interface { ++ Int64CounterOption ++ Int64UpDownCounterOption ++ Int64HistogramOption ++ Int64ObservableCounterOption ++ Int64ObservableUpDownCounterOption ++ Int64ObservableGaugeOption ++ ++ Float64CounterOption ++ Float64UpDownCounterOption ++ Float64HistogramOption ++ Float64ObservableCounterOption ++ Float64ObservableUpDownCounterOption ++ Float64ObservableGaugeOption ++} ++ ++// HistogramOption applies options to histogram instruments. ++type HistogramOption interface { ++ Int64HistogramOption ++ Float64HistogramOption ++} ++ ++type descOpt string ++ ++func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { ++ c.description = string(o) ++ return c ++} ++ ++// WithDescription sets the instrument description. ++func WithDescription(desc string) InstrumentOption { return descOpt(desc) } ++ ++type unitOpt string ++ ++func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { ++ c.unit = string(o) ++ return c ++} ++ ++// WithUnit sets the instrument unit. ++// ++// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. ++func WithUnit(u string) InstrumentOption { return unitOpt(u) } ++ ++// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. ++// ++// This option is considered "advisory", and may be ignored by API implementations. ++func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } ++ ++type bucketOpt []float64 ++ ++func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { ++ c.explicitBucketBoundaries = o ++ return c ++} ++ ++func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { ++ c.explicitBucketBoundaries = o ++ return c ++} ++ ++// AddOption applies options to an addition measurement. See ++// [MeasurementOption] for other options that can be used as an AddOption. ++type AddOption interface { ++ applyAdd(AddConfig) AddConfig ++} ++ ++// AddConfig contains options for an addition measurement. ++type AddConfig struct { ++ attrs attribute.Set ++} ++ ++// NewAddConfig returns a new [AddConfig] with all opts applied. ++func NewAddConfig(opts []AddOption) AddConfig { ++ config := AddConfig{attrs: *attribute.EmptySet()} ++ for _, o := range opts { ++ config = o.applyAdd(config) ++ } ++ return config ++} ++ ++// Attributes returns the configured attribute set. ++func (c AddConfig) Attributes() attribute.Set { ++ return c.attrs ++} ++ ++// RecordOption applies options to an addition measurement. See ++// [MeasurementOption] for other options that can be used as a RecordOption. ++type RecordOption interface { ++ applyRecord(RecordConfig) RecordConfig ++} ++ ++// RecordConfig contains options for a recorded measurement. ++type RecordConfig struct { ++ attrs attribute.Set ++} ++ ++// NewRecordConfig returns a new [RecordConfig] with all opts applied. ++func NewRecordConfig(opts []RecordOption) RecordConfig { ++ config := RecordConfig{attrs: *attribute.EmptySet()} ++ for _, o := range opts { ++ config = o.applyRecord(config) ++ } ++ return config ++} ++ ++// Attributes returns the configured attribute set. ++func (c RecordConfig) Attributes() attribute.Set { ++ return c.attrs ++} ++ ++// ObserveOption applies options to an addition measurement. See ++// [MeasurementOption] for other options that can be used as a ObserveOption. ++type ObserveOption interface { ++ applyObserve(ObserveConfig) ObserveConfig ++} ++ ++// ObserveConfig contains options for an observed measurement. ++type ObserveConfig struct { ++ attrs attribute.Set ++} ++ ++// NewObserveConfig returns a new [ObserveConfig] with all opts applied. ++func NewObserveConfig(opts []ObserveOption) ObserveConfig { ++ config := ObserveConfig{attrs: *attribute.EmptySet()} ++ for _, o := range opts { ++ config = o.applyObserve(config) ++ } ++ return config ++} ++ ++// Attributes returns the configured attribute set. ++func (c ObserveConfig) Attributes() attribute.Set { ++ return c.attrs ++} ++ ++// MeasurementOption applies options to all instrument measurement. ++type MeasurementOption interface { ++ AddOption ++ RecordOption ++ ObserveOption ++} ++ ++type attrOpt struct { ++ set attribute.Set ++} ++ ++// mergeSets returns the union of keys between a and b. Any duplicate keys will ++// use the value associated with b. ++func mergeSets(a, b attribute.Set) attribute.Set { ++ // NewMergeIterator uses the first value for any duplicates. ++ iter := attribute.NewMergeIterator(&b, &a) ++ merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) ++ for iter.Next() { ++ merged = append(merged, iter.Attribute()) ++ } ++ return attribute.NewSet(merged...) ++} ++ ++func (o attrOpt) applyAdd(c AddConfig) AddConfig { ++ switch { ++ case o.set.Len() == 0: ++ case c.attrs.Len() == 0: ++ c.attrs = o.set ++ default: ++ c.attrs = mergeSets(c.attrs, o.set) ++ } ++ return c ++} ++ ++func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { ++ switch { ++ case o.set.Len() == 0: ++ case c.attrs.Len() == 0: ++ c.attrs = o.set ++ default: ++ c.attrs = mergeSets(c.attrs, o.set) ++ } ++ return c ++} ++ ++func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { ++ switch { ++ case o.set.Len() == 0: ++ case c.attrs.Len() == 0: ++ c.attrs = o.set ++ default: ++ c.attrs = mergeSets(c.attrs, o.set) ++ } ++ return c ++} ++ ++// WithAttributeSet sets the attribute Set associated with a measurement is ++// made with. ++// ++// If multiple WithAttributeSet or WithAttributes options are passed the ++// attributes will be merged together in the order they are passed. Attributes ++// with duplicate keys will use the last value passed. ++func WithAttributeSet(attributes attribute.Set) MeasurementOption { ++ return attrOpt{set: attributes} ++} ++ ++// WithAttributes converts attributes into an attribute Set and sets the Set to ++// be associated with a measurement. This is shorthand for: ++// ++// cp := make([]attribute.KeyValue, len(attributes)) ++// copy(cp, attributes) ++// WithAttributes(attribute.NewSet(cp...)) ++// ++// [attribute.NewSet] may modify the passed attributes so this will make a copy ++// of attributes before creating a set in order to ensure this function is ++// concurrent safe. This makes this option function less optimized in ++// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be ++// preferred for performance sensitive code. ++// ++// See [WithAttributeSet] for information about how multiple WithAttributes are ++// merged. ++func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { ++ cp := make([]attribute.KeyValue, len(attributes)) ++ copy(cp, attributes) ++ return attrOpt{set: attribute.NewSet(cp...)} ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go +deleted file mode 100644 +index 370715f694c..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go ++++ /dev/null +@@ -1,70 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package asyncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" +- +-import ( +- "context" +- +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric/instrument" +-) +- +-// InstrumentProvider provides access to individual instruments. +-type InstrumentProvider interface { +- // Counter creates an instrument for recording increasing values. +- Counter(name string, opts ...instrument.Option) (Counter, error) +- +- // UpDownCounter creates an instrument for recording changes of a value. +- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) +- +- // Gauge creates an instrument for recording the current value. +- Gauge(name string, opts ...instrument.Option) (Gauge, error) +-} +- +-// Counter is an instrument that records increasing values. +-type Counter interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +- +-// UpDownCounter is an instrument that records increasing or decreasing values. +-type UpDownCounter interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +- +-// Gauge is an instrument that records independent readings. +-type Gauge interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go +deleted file mode 100644 +index 41a561bc4a2..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go ++++ /dev/null +@@ -1,70 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package asyncint64 // import "go.opentelemetry.io/otel/metric/instrument/asyncint64" +- +-import ( +- "context" +- +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric/instrument" +-) +- +-// InstrumentProvider provides access to individual instruments. +-type InstrumentProvider interface { +- // Counter creates an instrument for recording increasing values. +- Counter(name string, opts ...instrument.Option) (Counter, error) +- +- // UpDownCounter creates an instrument for recording changes of a value. +- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) +- +- // Gauge creates an instrument for recording the current value. +- Gauge(name string, opts ...instrument.Option) (Gauge, error) +-} +- +-// Counter is an instrument that records increasing values. +-type Counter interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +- +-// UpDownCounter is an instrument that records increasing or decreasing values. +-type UpDownCounter interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +- +-// Gauge is an instrument that records independent readings. +-type Gauge interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/config.go b/vendor/go.opentelemetry.io/otel/metric/instrument/config.go +deleted file mode 100644 +index 8778bce1619..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/config.go ++++ /dev/null +@@ -1,69 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package instrument // import "go.opentelemetry.io/otel/metric/instrument" +- +-import "go.opentelemetry.io/otel/metric/unit" +- +-// Config contains options for metric instrument descriptors. +-type Config struct { +- description string +- unit unit.Unit +-} +- +-// Description describes the instrument in human-readable terms. +-func (cfg Config) Description() string { +- return cfg.description +-} +- +-// Unit describes the measurement unit for an instrument. +-func (cfg Config) Unit() unit.Unit { +- return cfg.unit +-} +- +-// Option is an interface for applying metric instrument options. +-type Option interface { +- applyInstrument(Config) Config +-} +- +-// NewConfig creates a new Config and applies all the given options. +-func NewConfig(opts ...Option) Config { +- var config Config +- for _, o := range opts { +- config = o.applyInstrument(config) +- } +- return config +-} +- +-type optionFunc func(Config) Config +- +-func (fn optionFunc) applyInstrument(cfg Config) Config { +- return fn(cfg) +-} +- +-// WithDescription applies provided description. +-func WithDescription(desc string) Option { +- return optionFunc(func(cfg Config) Config { +- cfg.description = desc +- return cfg +- }) +-} +- +-// WithUnit applies provided unit. +-func WithUnit(u unit.Unit) Option { +- return optionFunc(func(cfg Config) Config { +- cfg.unit = u +- return cfg +- }) +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go +deleted file mode 100644 +index 435db1127bc..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go ++++ /dev/null +@@ -1,56 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package syncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- +-import ( +- "context" +- +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric/instrument" +-) +- +-// InstrumentProvider provides access to individual instruments. +-type InstrumentProvider interface { +- // Counter creates an instrument for recording increasing values. +- Counter(name string, opts ...instrument.Option) (Counter, error) +- // UpDownCounter creates an instrument for recording changes of a value. +- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) +- // Histogram creates an instrument for recording a distribution of values. +- Histogram(name string, opts ...instrument.Option) (Histogram, error) +-} +- +-// Counter is an instrument that records increasing values. +-type Counter interface { +- // Add records a change to the counter. +- Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +- +-// UpDownCounter is an instrument that records increasing or decreasing values. +-type UpDownCounter interface { +- // Add records a change to the counter. +- Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +- +-// Histogram is an instrument that records a distribution of values. +-type Histogram interface { +- // Record adds an additional value to the distribution. +- Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go +deleted file mode 100644 +index c77a4672860..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go ++++ /dev/null +@@ -1,56 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package syncint64 // import "go.opentelemetry.io/otel/metric/instrument/syncint64" +- +-import ( +- "context" +- +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric/instrument" +-) +- +-// InstrumentProvider provides access to individual instruments. +-type InstrumentProvider interface { +- // Counter creates an instrument for recording increasing values. +- Counter(name string, opts ...instrument.Option) (Counter, error) +- // UpDownCounter creates an instrument for recording changes of a value. +- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) +- // Histogram creates an instrument for recording a distribution of values. +- Histogram(name string, opts ...instrument.Option) (Histogram, error) +-} +- +-// Counter is an instrument that records increasing values. +-type Counter interface { +- // Add records a change to the counter. +- Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +- +-// UpDownCounter is an instrument that records increasing or decreasing values. +-type UpDownCounter interface { +- // Add records a change to the counter. +- Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +- +-// Histogram is an instrument that records a distribution of values. +-type Histogram interface { +- // Record adds an additional value to the distribution. +- Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go +deleted file mode 100644 +index aed8b6660a5..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go ++++ /dev/null +@@ -1,360 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package global // import "go.opentelemetry.io/otel/metric/internal/global" +- +-import ( +- "context" +- "sync/atomic" +- +- "go.opentelemetry.io/otel" +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric" +- "go.opentelemetry.io/otel/metric/instrument" +- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/asyncint64" +- "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/syncint64" +-) +- +-type afCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncfloat64.Counter +- +- instrument.Asynchronous +-} +- +-func (i *afCounter) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncFloat64().Counter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *afCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncfloat64.Counter).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *afCounter) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncfloat64.Counter) +- } +- return nil +-} +- +-type afUpDownCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncfloat64.UpDownCounter +- +- instrument.Asynchronous +-} +- +-func (i *afUpDownCounter) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncFloat64().UpDownCounter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *afUpDownCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncfloat64.UpDownCounter).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *afUpDownCounter) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncfloat64.UpDownCounter) +- } +- return nil +-} +- +-type afGauge struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncfloat64.Gauge +- +- instrument.Asynchronous +-} +- +-func (i *afGauge) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncFloat64().Gauge(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *afGauge) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncfloat64.Gauge).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *afGauge) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncfloat64.Gauge) +- } +- return nil +-} +- +-type aiCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncint64.Counter +- +- instrument.Asynchronous +-} +- +-func (i *aiCounter) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncInt64().Counter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *aiCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncint64.Counter).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *aiCounter) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncint64.Counter) +- } +- return nil +-} +- +-type aiUpDownCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncint64.UpDownCounter +- +- instrument.Asynchronous +-} +- +-func (i *aiUpDownCounter) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncInt64().UpDownCounter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *aiUpDownCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncint64.UpDownCounter).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *aiUpDownCounter) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncint64.UpDownCounter) +- } +- return nil +-} +- +-type aiGauge struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncint64.Gauge +- +- instrument.Asynchronous +-} +- +-func (i *aiGauge) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncInt64().Gauge(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *aiGauge) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncint64.Gauge).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *aiGauge) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncint64.Gauge) +- } +- return nil +-} +- +-//Sync Instruments. +-type sfCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncfloat64.Counter +- +- instrument.Synchronous +-} +- +-func (i *sfCounter) setDelegate(m metric.Meter) { +- ctr, err := m.SyncFloat64().Counter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *sfCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncfloat64.Counter).Add(ctx, incr, attrs...) +- } +-} +- +-type sfUpDownCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncfloat64.UpDownCounter +- +- instrument.Synchronous +-} +- +-func (i *sfUpDownCounter) setDelegate(m metric.Meter) { +- ctr, err := m.SyncFloat64().UpDownCounter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncfloat64.UpDownCounter).Add(ctx, incr, attrs...) +- } +-} +- +-type sfHistogram struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncfloat64.Histogram +- +- instrument.Synchronous +-} +- +-func (i *sfHistogram) setDelegate(m metric.Meter) { +- ctr, err := m.SyncFloat64().Histogram(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *sfHistogram) Record(ctx context.Context, x float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncfloat64.Histogram).Record(ctx, x, attrs...) +- } +-} +- +-type siCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncint64.Counter +- +- instrument.Synchronous +-} +- +-func (i *siCounter) setDelegate(m metric.Meter) { +- ctr, err := m.SyncInt64().Counter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *siCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncint64.Counter).Add(ctx, x, attrs...) +- } +-} +- +-type siUpDownCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncint64.UpDownCounter +- +- instrument.Synchronous +-} +- +-func (i *siUpDownCounter) setDelegate(m metric.Meter) { +- ctr, err := m.SyncInt64().UpDownCounter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *siUpDownCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncint64.UpDownCounter).Add(ctx, x, attrs...) +- } +-} +- +-type siHistogram struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncint64.Histogram +- +- instrument.Synchronous +-} +- +-func (i *siHistogram) setDelegate(m metric.Meter) { +- ctr, err := m.SyncInt64().Histogram(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *siHistogram) Record(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncint64.Histogram).Record(ctx, x, attrs...) +- } +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go +deleted file mode 100644 +index 0fa924f397c..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go ++++ /dev/null +@@ -1,347 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package global // import "go.opentelemetry.io/otel/metric/internal/global" +- +-import ( +- "context" +- "sync" +- "sync/atomic" +- +- "go.opentelemetry.io/otel" +- "go.opentelemetry.io/otel/metric" +- "go.opentelemetry.io/otel/metric/instrument" +- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/asyncint64" +- "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/syncint64" +-) +- +-// meterProvider is a placeholder for a configured SDK MeterProvider. +-// +-// All MeterProvider functionality is forwarded to a delegate once +-// configured. +-type meterProvider struct { +- mtx sync.Mutex +- meters map[il]*meter +- +- delegate metric.MeterProvider +-} +- +-type il struct { +- name string +- version string +-} +- +-// setDelegate configures p to delegate all MeterProvider functionality to +-// provider. +-// +-// All Meters provided prior to this function call are switched out to be +-// Meters provided by provider. All instruments and callbacks are recreated and +-// delegated. +-// +-// It is guaranteed by the caller that this happens only once. +-func (p *meterProvider) setDelegate(provider metric.MeterProvider) { +- p.mtx.Lock() +- defer p.mtx.Unlock() +- +- p.delegate = provider +- +- if len(p.meters) == 0 { +- return +- } +- +- for _, meter := range p.meters { +- meter.setDelegate(provider) +- } +- +- p.meters = nil +-} +- +-// Meter implements MeterProvider. +-func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { +- p.mtx.Lock() +- defer p.mtx.Unlock() +- +- if p.delegate != nil { +- return p.delegate.Meter(name, opts...) +- } +- +- // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. +- +- c := metric.NewMeterConfig(opts...) +- key := il{ +- name: name, +- version: c.InstrumentationVersion(), +- } +- +- if p.meters == nil { +- p.meters = make(map[il]*meter) +- } +- +- if val, ok := p.meters[key]; ok { +- return val +- } +- +- t := &meter{name: name, opts: opts} +- p.meters[key] = t +- return t +-} +- +-// meter is a placeholder for a metric.Meter. +-// +-// All Meter functionality is forwarded to a delegate once configured. +-// Otherwise, all functionality is forwarded to a NoopMeter. +-type meter struct { +- name string +- opts []metric.MeterOption +- +- mtx sync.Mutex +- instruments []delegatedInstrument +- callbacks []delegatedCallback +- +- delegate atomic.Value // metric.Meter +-} +- +-type delegatedInstrument interface { +- setDelegate(metric.Meter) +-} +- +-// setDelegate configures m to delegate all Meter functionality to Meters +-// created by provider. +-// +-// All subsequent calls to the Meter methods will be passed to the delegate. +-// +-// It is guaranteed by the caller that this happens only once. +-func (m *meter) setDelegate(provider metric.MeterProvider) { +- meter := provider.Meter(m.name, m.opts...) +- m.delegate.Store(meter) +- +- m.mtx.Lock() +- defer m.mtx.Unlock() +- +- for _, inst := range m.instruments { +- inst.setDelegate(meter) +- } +- +- for _, callback := range m.callbacks { +- callback.setDelegate(meter) +- } +- +- m.instruments = nil +- m.callbacks = nil +-} +- +-// AsyncInt64 is the namespace for the Asynchronous Integer instruments. +-// +-// To Observe data with instruments it must be registered in a callback. +-func (m *meter) AsyncInt64() asyncint64.InstrumentProvider { +- if del, ok := m.delegate.Load().(metric.Meter); ok { +- return del.AsyncInt64() +- } +- return (*aiInstProvider)(m) +-} +- +-// AsyncFloat64 is the namespace for the Asynchronous Float instruments. +-// +-// To Observe data with instruments it must be registered in a callback. +-func (m *meter) AsyncFloat64() asyncfloat64.InstrumentProvider { +- if del, ok := m.delegate.Load().(metric.Meter); ok { +- return del.AsyncFloat64() +- } +- return (*afInstProvider)(m) +-} +- +-// RegisterCallback captures the function that will be called during Collect. +-// +-// It is only valid to call Observe within the scope of the passed function, +-// and only on the instruments that were registered with this call. +-func (m *meter) RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error { +- if del, ok := m.delegate.Load().(metric.Meter); ok { +- insts = unwrapInstruments(insts) +- return del.RegisterCallback(insts, function) +- } +- +- m.mtx.Lock() +- defer m.mtx.Unlock() +- m.callbacks = append(m.callbacks, delegatedCallback{ +- instruments: insts, +- function: function, +- }) +- +- return nil +-} +- +-type wrapped interface { +- unwrap() instrument.Asynchronous +-} +- +-func unwrapInstruments(instruments []instrument.Asynchronous) []instrument.Asynchronous { +- out := make([]instrument.Asynchronous, 0, len(instruments)) +- +- for _, inst := range instruments { +- if in, ok := inst.(wrapped); ok { +- out = append(out, in.unwrap()) +- } else { +- out = append(out, inst) +- } +- } +- +- return out +-} +- +-// SyncInt64 is the namespace for the Synchronous Integer instruments. +-func (m *meter) SyncInt64() syncint64.InstrumentProvider { +- if del, ok := m.delegate.Load().(metric.Meter); ok { +- return del.SyncInt64() +- } +- return (*siInstProvider)(m) +-} +- +-// SyncFloat64 is the namespace for the Synchronous Float instruments. +-func (m *meter) SyncFloat64() syncfloat64.InstrumentProvider { +- if del, ok := m.delegate.Load().(metric.Meter); ok { +- return del.SyncFloat64() +- } +- return (*sfInstProvider)(m) +-} +- +-type delegatedCallback struct { +- instruments []instrument.Asynchronous +- function func(context.Context) +-} +- +-func (c *delegatedCallback) setDelegate(m metric.Meter) { +- insts := unwrapInstruments(c.instruments) +- err := m.RegisterCallback(insts, c.function) +- if err != nil { +- otel.Handle(err) +- } +-} +- +-type afInstProvider meter +- +-// Counter creates an instrument for recording increasing values. +-func (ip *afInstProvider) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &afCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// UpDownCounter creates an instrument for recording changes of a value. +-func (ip *afInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &afUpDownCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// Gauge creates an instrument for recording the current value. +-func (ip *afInstProvider) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &afGauge{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-type aiInstProvider meter +- +-// Counter creates an instrument for recording increasing values. +-func (ip *aiInstProvider) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &aiCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// UpDownCounter creates an instrument for recording changes of a value. +-func (ip *aiInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &aiUpDownCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// Gauge creates an instrument for recording the current value. +-func (ip *aiInstProvider) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &aiGauge{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-type sfInstProvider meter +- +-// Counter creates an instrument for recording increasing values. +-func (ip *sfInstProvider) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &sfCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// UpDownCounter creates an instrument for recording changes of a value. +-func (ip *sfInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &sfUpDownCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// Histogram creates an instrument for recording a distribution of values. +-func (ip *sfInstProvider) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &sfHistogram{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-type siInstProvider meter +- +-// Counter creates an instrument for recording increasing values. +-func (ip *siInstProvider) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &siCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// UpDownCounter creates an instrument for recording changes of a value. +-func (ip *siInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &siUpDownCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// Histogram creates an instrument for recording a distribution of values. +-func (ip *siInstProvider) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &siHistogram{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go +deleted file mode 100644 +index 47c0d787d8a..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go ++++ /dev/null +@@ -1,68 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// htmp://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package global // import "go.opentelemetry.io/otel/metric/internal/global" +- +-import ( +- "errors" +- "sync" +- "sync/atomic" +- +- "go.opentelemetry.io/otel/internal/global" +- "go.opentelemetry.io/otel/metric" +-) +- +-var ( +- globalMeterProvider = defaultMeterProvider() +- +- delegateMeterOnce sync.Once +-) +- +-type meterProviderHolder struct { +- mp metric.MeterProvider +-} +- +-// MeterProvider is the internal implementation for global.MeterProvider. +-func MeterProvider() metric.MeterProvider { +- return globalMeterProvider.Load().(meterProviderHolder).mp +-} +- +-// SetMeterProvider is the internal implementation for global.SetMeterProvider. +-func SetMeterProvider(mp metric.MeterProvider) { +- current := MeterProvider() +- if _, cOk := current.(*meterProvider); cOk { +- if _, mpOk := mp.(*meterProvider); mpOk && current == mp { +- // Do not assign the default delegating MeterProvider to delegate +- // to itself. +- global.Error( +- errors.New("no delegate configured in meter provider"), +- "Setting meter provider to it's current value. No delegate will be configured", +- ) +- return +- } +- } +- +- delegateMeterOnce.Do(func() { +- if def, ok := current.(*meterProvider); ok { +- def.setDelegate(mp) +- } +- }) +- globalMeterProvider.Store(meterProviderHolder{mp: mp}) +-} +- +-func defaultMeterProvider() *atomic.Value { +- v := &atomic.Value{} +- v.Store(meterProviderHolder{mp: &meterProvider{}}) +- return v +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go +index 21fc1c499fb..2520bc74af1 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/meter.go ++++ b/vendor/go.opentelemetry.io/otel/metric/meter.go +@@ -17,44 +17,196 @@ package metric // import "go.opentelemetry.io/otel/metric" + import ( + "context" + +- "go.opentelemetry.io/otel/metric/instrument" +- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/asyncint64" +- "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/syncint64" ++ "go.opentelemetry.io/otel/metric/embedded" + ) + + // MeterProvider provides access to named Meter instances, for instrumenting +-// an application or library. ++// an application or package. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. + type MeterProvider interface { +- // Meter creates an instance of a `Meter` interface. The instrumentationName +- // must be the name of the library providing instrumentation. This name may +- // be the same as the instrumented code only if that code provides built-in +- // instrumentation. If the instrumentationName is empty, then a +- // implementation defined default name will be used instead. +- Meter(instrumentationName string, opts ...MeterOption) Meter ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.MeterProvider ++ ++ // Meter returns a new Meter with the provided name and configuration. ++ // ++ // A Meter should be scoped at most to a single package. The name needs to ++ // be unique so it does not collide with other names used by ++ // an application, nor other applications. To achieve this, the import path ++ // of the instrumentation package is recommended to be used as name. ++ // ++ // If the name is empty, then an implementation defined default name will ++ // be used instead. ++ Meter(name string, opts ...MeterOption) Meter + } + + // Meter provides access to instrument instances for recording metrics. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. + type Meter interface { +- // AsyncInt64 is the namespace for the Asynchronous Integer instruments. ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Meter ++ ++ // Int64Counter returns a new Int64Counter instrument identified by name ++ // and configured with options. The instrument is used to synchronously ++ // record increasing int64 measurements during a computational operation. ++ Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) ++ // Int64UpDownCounter returns a new Int64UpDownCounter instrument ++ // identified by name and configured with options. The instrument is used ++ // to synchronously record int64 measurements during a computational ++ // operation. ++ Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) ++ // Int64Histogram returns a new Int64Histogram instrument identified by ++ // name and configured with options. The instrument is used to ++ // synchronously record the distribution of int64 measurements during a ++ // computational operation. ++ Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) ++ // Int64ObservableCounter returns a new Int64ObservableCounter identified ++ // by name and configured with options. The instrument is used to ++ // asynchronously record increasing int64 measurements once per a ++ // measurement collection cycle. ++ // ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithInt64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) ++ // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter ++ // instrument identified by name and configured with options. The ++ // instrument is used to asynchronously record int64 measurements once per ++ // a measurement collection cycle. + // +- // To Observe data with instruments it must be registered in a callback. +- AsyncInt64() asyncint64.InstrumentProvider ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithInt64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) ++ // Int64ObservableGauge returns a new Int64ObservableGauge instrument ++ // identified by name and configured with options. The instrument is used ++ // to asynchronously record instantaneous int64 measurements once per a ++ // measurement collection cycle. ++ // ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithInt64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + +- // AsyncFloat64 is the namespace for the Asynchronous Float instruments ++ // Float64Counter returns a new Float64Counter instrument identified by ++ // name and configured with options. The instrument is used to ++ // synchronously record increasing float64 measurements during a ++ // computational operation. ++ Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) ++ // Float64UpDownCounter returns a new Float64UpDownCounter instrument ++ // identified by name and configured with options. The instrument is used ++ // to synchronously record float64 measurements during a computational ++ // operation. ++ Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) ++ // Float64Histogram returns a new Float64Histogram instrument identified by ++ // name and configured with options. The instrument is used to ++ // synchronously record the distribution of float64 measurements during a ++ // computational operation. ++ Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) ++ // Float64ObservableCounter returns a new Float64ObservableCounter ++ // instrument identified by name and configured with options. The ++ // instrument is used to asynchronously record increasing float64 ++ // measurements once per a measurement collection cycle. ++ // ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithFloat64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) ++ // Float64ObservableUpDownCounter returns a new ++ // Float64ObservableUpDownCounter instrument identified by name and ++ // configured with options. The instrument is used to asynchronously record ++ // float64 measurements once per a measurement collection cycle. ++ // ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithFloat64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) ++ // Float64ObservableGauge returns a new Float64ObservableGauge instrument ++ // identified by name and configured with options. The instrument is used ++ // to asynchronously record instantaneous float64 measurements once per a ++ // measurement collection cycle. + // +- // To Observe data with instruments it must be registered in a callback. +- AsyncFloat64() asyncfloat64.InstrumentProvider ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithFloat64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + +- // RegisterCallback captures the function that will be called during Collect. ++ // RegisterCallback registers f to be called during the collection of a ++ // measurement cycle. ++ // ++ // If Unregister of the returned Registration is called, f needs to be ++ // unregistered and not called during collection. ++ // ++ // The instruments f is registered with are the only instruments that f may ++ // observe values for. ++ // ++ // If no instruments are passed, f should not be registered nor called ++ // during collection. + // +- // It is only valid to call Observe within the scope of the passed function, +- // and only on the instruments that were registered with this call. +- RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error ++ // The function f needs to be concurrent safe. ++ RegisterCallback(f Callback, instruments ...Observable) (Registration, error) ++} ++ ++// Callback is a function registered with a Meter that makes observations for ++// the set of instruments it is registered with. The Observer parameter is used ++// to record measurement observations for these instruments. ++// ++// The function needs to complete in a finite amount of time and the deadline ++// of the passed context is expected to be honored. ++// ++// The function needs to make unique observations across all registered ++// Callbacks. Meaning, it should not report measurements for an instrument with ++// the same attributes as another Callback will report. ++// ++// The function needs to be concurrent safe. ++type Callback func(context.Context, Observer) error ++ ++// Observer records measurements for multiple instruments in a Callback. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Observer interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Observer ++ ++ // ObserveFloat64 records the float64 value for obsrv. ++ ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) ++ // ObserveInt64 records the int64 value for obsrv. ++ ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) ++} + +- // SyncInt64 is the namespace for the Synchronous Integer instruments +- SyncInt64() syncint64.InstrumentProvider +- // SyncFloat64 is the namespace for the Synchronous Float instruments +- SyncFloat64() syncfloat64.InstrumentProvider ++// Registration is an token representing the unique registration of a callback ++// for a set of instruments with a Meter. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Registration interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Registration ++ ++ // Unregister removes the callback registration from a Meter. ++ // ++ // This method needs to be idempotent and concurrent safe. ++ Unregister() error + } +diff --git a/vendor/go.opentelemetry.io/otel/metric/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop.go +deleted file mode 100644 +index e8b9a9a1458..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/noop.go ++++ /dev/null +@@ -1,181 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package metric // import "go.opentelemetry.io/otel/metric" +- +-import ( +- "context" +- +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric/instrument" +- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/asyncint64" +- "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/syncint64" +-) +- +-// NewNoopMeterProvider creates a MeterProvider that does not record any metrics. +-func NewNoopMeterProvider() MeterProvider { +- return noopMeterProvider{} +-} +- +-type noopMeterProvider struct{} +- +-func (noopMeterProvider) Meter(string, ...MeterOption) Meter { +- return noopMeter{} +-} +- +-// NewNoopMeter creates a Meter that does not record any metrics. +-func NewNoopMeter() Meter { +- return noopMeter{} +-} +- +-type noopMeter struct{} +- +-// AsyncInt64 creates an instrument that does not record any metrics. +-func (noopMeter) AsyncInt64() asyncint64.InstrumentProvider { +- return nonrecordingAsyncInt64Instrument{} +-} +- +-// AsyncFloat64 creates an instrument that does not record any metrics. +-func (noopMeter) AsyncFloat64() asyncfloat64.InstrumentProvider { +- return nonrecordingAsyncFloat64Instrument{} +-} +- +-// SyncInt64 creates an instrument that does not record any metrics. +-func (noopMeter) SyncInt64() syncint64.InstrumentProvider { +- return nonrecordingSyncInt64Instrument{} +-} +- +-// SyncFloat64 creates an instrument that does not record any metrics. +-func (noopMeter) SyncFloat64() syncfloat64.InstrumentProvider { +- return nonrecordingSyncFloat64Instrument{} +-} +- +-// RegisterCallback creates a register callback that does not record any metrics. +-func (noopMeter) RegisterCallback([]instrument.Asynchronous, func(context.Context)) error { +- return nil +-} +- +-type nonrecordingAsyncFloat64Instrument struct { +- instrument.Asynchronous +-} +- +-var ( +- _ asyncfloat64.InstrumentProvider = nonrecordingAsyncFloat64Instrument{} +- _ asyncfloat64.Counter = nonrecordingAsyncFloat64Instrument{} +- _ asyncfloat64.UpDownCounter = nonrecordingAsyncFloat64Instrument{} +- _ asyncfloat64.Gauge = nonrecordingAsyncFloat64Instrument{} +-) +- +-func (n nonrecordingAsyncFloat64Instrument) Counter(string, ...instrument.Option) (asyncfloat64.Counter, error) { +- return n, nil +-} +- +-func (n nonrecordingAsyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (asyncfloat64.UpDownCounter, error) { +- return n, nil +-} +- +-func (n nonrecordingAsyncFloat64Instrument) Gauge(string, ...instrument.Option) (asyncfloat64.Gauge, error) { +- return n, nil +-} +- +-func (nonrecordingAsyncFloat64Instrument) Observe(context.Context, float64, ...attribute.KeyValue) { +- +-} +- +-type nonrecordingAsyncInt64Instrument struct { +- instrument.Asynchronous +-} +- +-var ( +- _ asyncint64.InstrumentProvider = nonrecordingAsyncInt64Instrument{} +- _ asyncint64.Counter = nonrecordingAsyncInt64Instrument{} +- _ asyncint64.UpDownCounter = nonrecordingAsyncInt64Instrument{} +- _ asyncint64.Gauge = nonrecordingAsyncInt64Instrument{} +-) +- +-func (n nonrecordingAsyncInt64Instrument) Counter(string, ...instrument.Option) (asyncint64.Counter, error) { +- return n, nil +-} +- +-func (n nonrecordingAsyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (asyncint64.UpDownCounter, error) { +- return n, nil +-} +- +-func (n nonrecordingAsyncInt64Instrument) Gauge(string, ...instrument.Option) (asyncint64.Gauge, error) { +- return n, nil +-} +- +-func (nonrecordingAsyncInt64Instrument) Observe(context.Context, int64, ...attribute.KeyValue) { +-} +- +-type nonrecordingSyncFloat64Instrument struct { +- instrument.Synchronous +-} +- +-var ( +- _ syncfloat64.InstrumentProvider = nonrecordingSyncFloat64Instrument{} +- _ syncfloat64.Counter = nonrecordingSyncFloat64Instrument{} +- _ syncfloat64.UpDownCounter = nonrecordingSyncFloat64Instrument{} +- _ syncfloat64.Histogram = nonrecordingSyncFloat64Instrument{} +-) +- +-func (n nonrecordingSyncFloat64Instrument) Counter(string, ...instrument.Option) (syncfloat64.Counter, error) { +- return n, nil +-} +- +-func (n nonrecordingSyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (syncfloat64.UpDownCounter, error) { +- return n, nil +-} +- +-func (n nonrecordingSyncFloat64Instrument) Histogram(string, ...instrument.Option) (syncfloat64.Histogram, error) { +- return n, nil +-} +- +-func (nonrecordingSyncFloat64Instrument) Add(context.Context, float64, ...attribute.KeyValue) { +- +-} +- +-func (nonrecordingSyncFloat64Instrument) Record(context.Context, float64, ...attribute.KeyValue) { +- +-} +- +-type nonrecordingSyncInt64Instrument struct { +- instrument.Synchronous +-} +- +-var ( +- _ syncint64.InstrumentProvider = nonrecordingSyncInt64Instrument{} +- _ syncint64.Counter = nonrecordingSyncInt64Instrument{} +- _ syncint64.UpDownCounter = nonrecordingSyncInt64Instrument{} +- _ syncint64.Histogram = nonrecordingSyncInt64Instrument{} +-) +- +-func (n nonrecordingSyncInt64Instrument) Counter(string, ...instrument.Option) (syncint64.Counter, error) { +- return n, nil +-} +- +-func (n nonrecordingSyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (syncint64.UpDownCounter, error) { +- return n, nil +-} +- +-func (n nonrecordingSyncInt64Instrument) Histogram(string, ...instrument.Option) (syncint64.Histogram, error) { +- return n, nil +-} +- +-func (nonrecordingSyncInt64Instrument) Add(context.Context, int64, ...attribute.KeyValue) { +-} +-func (nonrecordingSyncInt64Instrument) Record(context.Context, int64, ...attribute.KeyValue) { +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +new file mode 100644 +index 00000000000..0a4825ae6a7 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +@@ -0,0 +1,185 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package metric // import "go.opentelemetry.io/otel/metric" ++ ++import ( ++ "context" ++ ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// Float64Counter is an instrument that records increasing float64 values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64Counter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64Counter ++ ++ // Add records a change to the counter. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Add(ctx context.Context, incr float64, options ...AddOption) ++} ++ ++// Float64CounterConfig contains options for synchronous counter instruments that ++// record int64 values. ++type Float64CounterConfig struct { ++ description string ++ unit string ++} ++ ++// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts ++// applied. ++func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { ++ var config Float64CounterConfig ++ for _, o := range opts { ++ config = o.applyFloat64Counter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64CounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64CounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Float64CounterOption applies options to a [Float64CounterConfig]. See ++// [InstrumentOption] for other options that can be used as a ++// Float64CounterOption. ++type Float64CounterOption interface { ++ applyFloat64Counter(Float64CounterConfig) Float64CounterConfig ++} ++ ++// Float64UpDownCounter is an instrument that records increasing or decreasing ++// float64 values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64UpDownCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64UpDownCounter ++ ++ // Add records a change to the counter. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Add(ctx context.Context, incr float64, options ...AddOption) ++} ++ ++// Float64UpDownCounterConfig contains options for synchronous counter ++// instruments that record int64 values. ++type Float64UpDownCounterConfig struct { ++ description string ++ unit string ++} ++ ++// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] ++// with all opts applied. ++func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { ++ var config Float64UpDownCounterConfig ++ for _, o := range opts { ++ config = o.applyFloat64UpDownCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64UpDownCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64UpDownCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Float64UpDownCounterOption applies options to a ++// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that ++// can be used as a Float64UpDownCounterOption. ++type Float64UpDownCounterOption interface { ++ applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig ++} ++ ++// Float64Histogram is an instrument that records a distribution of float64 ++// values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64Histogram interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64Histogram ++ ++ // Record adds an additional value to the distribution. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Record(ctx context.Context, incr float64, options ...RecordOption) ++} ++ ++// Float64HistogramConfig contains options for synchronous counter instruments ++// that record int64 values. ++type Float64HistogramConfig struct { ++ description string ++ unit string ++ explicitBucketBoundaries []float64 ++} ++ ++// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all ++// opts applied. ++func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { ++ var config Float64HistogramConfig ++ for _, o := range opts { ++ config = o.applyFloat64Histogram(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64HistogramConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64HistogramConfig) Unit() string { ++ return c.unit ++} ++ ++// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. ++func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { ++ return c.explicitBucketBoundaries ++} ++ ++// Float64HistogramOption applies options to a [Float64HistogramConfig]. See ++// [InstrumentOption] for other options that can be used as a ++// Float64HistogramOption. ++type Float64HistogramOption interface { ++ applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go +new file mode 100644 +index 00000000000..56667d32fc0 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go +@@ -0,0 +1,185 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package metric // import "go.opentelemetry.io/otel/metric" ++ ++import ( ++ "context" ++ ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// Int64Counter is an instrument that records increasing int64 values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64Counter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64Counter ++ ++ // Add records a change to the counter. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Add(ctx context.Context, incr int64, options ...AddOption) ++} ++ ++// Int64CounterConfig contains options for synchronous counter instruments that ++// record int64 values. ++type Int64CounterConfig struct { ++ description string ++ unit string ++} ++ ++// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts ++// applied. ++func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { ++ var config Int64CounterConfig ++ for _, o := range opts { ++ config = o.applyInt64Counter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64CounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64CounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Int64CounterOption applies options to a [Int64CounterConfig]. See ++// [InstrumentOption] for other options that can be used as an ++// Int64CounterOption. ++type Int64CounterOption interface { ++ applyInt64Counter(Int64CounterConfig) Int64CounterConfig ++} ++ ++// Int64UpDownCounter is an instrument that records increasing or decreasing ++// int64 values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64UpDownCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64UpDownCounter ++ ++ // Add records a change to the counter. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Add(ctx context.Context, incr int64, options ...AddOption) ++} ++ ++// Int64UpDownCounterConfig contains options for synchronous counter ++// instruments that record int64 values. ++type Int64UpDownCounterConfig struct { ++ description string ++ unit string ++} ++ ++// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with ++// all opts applied. ++func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { ++ var config Int64UpDownCounterConfig ++ for _, o := range opts { ++ config = o.applyInt64UpDownCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64UpDownCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64UpDownCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. ++// See [InstrumentOption] for other options that can be used as an ++// Int64UpDownCounterOption. ++type Int64UpDownCounterOption interface { ++ applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig ++} ++ ++// Int64Histogram is an instrument that records a distribution of int64 ++// values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64Histogram interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64Histogram ++ ++ // Record adds an additional value to the distribution. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Record(ctx context.Context, incr int64, options ...RecordOption) ++} ++ ++// Int64HistogramConfig contains options for synchronous counter instruments ++// that record int64 values. ++type Int64HistogramConfig struct { ++ description string ++ unit string ++ explicitBucketBoundaries []float64 ++} ++ ++// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts ++// applied. ++func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { ++ var config Int64HistogramConfig ++ for _, o := range opts { ++ config = o.applyInt64Histogram(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64HistogramConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64HistogramConfig) Unit() string { ++ return c.unit ++} ++ ++// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. ++func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { ++ return c.explicitBucketBoundaries ++} ++ ++// Int64HistogramOption applies options to a [Int64HistogramConfig]. See ++// [InstrumentOption] for other options that can be used as an ++// Int64HistogramOption. ++type Int64HistogramOption interface { ++ applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig ++} +diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +index 902692da082..75a8f3435a5 100644 +--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go ++++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +@@ -40,8 +40,10 @@ const ( + // their proprietary information. + type TraceContext struct{} + +-var _ TextMapPropagator = TraceContext{} +-var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") ++var ( ++ _ TextMapPropagator = TraceContext{} ++ traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") ++) + + // Inject set tracecontext from the Context into the carrier. + func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { +diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt +new file mode 100644 +index 00000000000..e0a43e13840 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/requirements.txt +@@ -0,0 +1 @@ ++codespell==2.2.6 +diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +index 5e94b8ae521..59dcfab2501 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +@@ -70,8 +70,8 @@ const ( + // returned. + func firstInt(defaultValue int, keys ...string) int { + for _, key := range keys { +- value, ok := os.LookupEnv(key) +- if !ok { ++ value := os.Getenv(key) ++ if value == "" { + continue + } + +@@ -88,10 +88,10 @@ func firstInt(defaultValue int, keys ...string) int { + } + + // IntEnvOr returns the int value of the environment variable with name key if +-// it exists and the value is an int. Otherwise, defaultValue is returned. ++// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned. + func IntEnvOr(key string, defaultValue int) int { +- value, ok := os.LookupEnv(key) +- if !ok { ++ value := os.Getenv(key) ++ if value == "" { + return defaultValue + } + +diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go +new file mode 100644 +index 00000000000..bd84f624b45 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go +@@ -0,0 +1,29 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package internal // import "go.opentelemetry.io/otel/sdk/internal" ++ ++//go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go ++//go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go ++//go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go ++ ++//go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go +diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go +index 84a02306e64..dfeaaa8ca04 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go +@@ -14,16 +14,7 @@ + + package internal // import "go.opentelemetry.io/otel/sdk/internal" + +-import ( +- "fmt" +- "time" +- +- "go.opentelemetry.io/otel" +-) +- +-// UserAgent is the user agent to be added to the outgoing +-// requests from the exporters. +-var UserAgent = fmt.Sprintf("opentelemetry-go/%s", otel.Version()) ++import "time" + + // MonotonicEndTime returns the end time at present + // but offset from start, monotonically. +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +index c1d220408ae..4279013be88 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +@@ -18,14 +18,13 @@ import ( + "context" + "errors" + "fmt" ++ "strings" + ) + +-var ( +- // ErrPartialResource is returned by a detector when complete source +- // information for a Resource is unavailable or the source information +- // contains invalid values that are omitted from the returned Resource. +- ErrPartialResource = errors.New("partial resource") +-) ++// ErrPartialResource is returned by a detector when complete source ++// information for a Resource is unavailable or the source information ++// contains invalid values that are omitted from the returned Resource. ++var ErrPartialResource = errors.New("partial resource") + + // Detector detects OpenTelemetry resource information. + type Detector interface { +@@ -45,28 +44,65 @@ type Detector interface { + // Detect calls all input detectors sequentially and merges each result with the previous one. + // It returns the merged error too. + func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { +- var autoDetectedRes *Resource +- var errInfo []string ++ r := new(Resource) ++ return r, detect(ctx, r, detectors) ++} ++ ++// detect runs all detectors using ctx and merges the result into res. This ++// assumes res is allocated and not nil, it will panic otherwise. ++func detect(ctx context.Context, res *Resource, detectors []Detector) error { ++ var ( ++ r *Resource ++ errs detectErrs ++ err error ++ ) ++ + for _, detector := range detectors { + if detector == nil { + continue + } +- res, err := detector.Detect(ctx) ++ r, err = detector.Detect(ctx) + if err != nil { +- errInfo = append(errInfo, err.Error()) ++ errs = append(errs, err) + if !errors.Is(err, ErrPartialResource) { + continue + } + } +- autoDetectedRes, err = Merge(autoDetectedRes, res) ++ r, err = Merge(res, r) + if err != nil { +- errInfo = append(errInfo, err.Error()) ++ errs = append(errs, err) + } ++ *res = *r ++ } ++ ++ if len(errs) == 0 { ++ return nil ++ } ++ return errs ++} ++ ++type detectErrs []error ++ ++func (e detectErrs) Error() string { ++ errStr := make([]string, len(e)) ++ for i, err := range e { ++ errStr[i] = fmt.Sprintf("* %s", err) + } + +- var aggregatedError error +- if len(errInfo) > 0 { +- aggregatedError = fmt.Errorf("detecting resources: %s", errInfo) ++ format := "%d errors occurred detecting resource:\n\t%s" ++ return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) ++} ++ ++func (e detectErrs) Unwrap() error { ++ switch len(e) { ++ case 0: ++ return nil ++ case 1: ++ return e[0] + } +- return autoDetectedRes, aggregatedError ++ return e[1:] ++} ++ ++func (e detectErrs) Is(target error) bool { ++ return len(e) != 0 && errors.Is(e[0], target) + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +index 7af46c61af0..c63a0dd1f8c 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +@@ -20,9 +20,9 @@ import ( + "os" + "path/filepath" + +- "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ "go.opentelemetry.io/otel/sdk" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + ) + + type ( +@@ -60,9 +60,9 @@ var ( + func (telemetrySDK) Detect(context.Context) (*Resource, error) { + return NewWithAttributes( + semconv.SchemaURL, +- semconv.TelemetrySDKNameKey.String("opentelemetry"), +- semconv.TelemetrySDKLanguageKey.String("go"), +- semconv.TelemetrySDKVersionKey.String(otel.Version()), ++ semconv.TelemetrySDKName("opentelemetry"), ++ semconv.TelemetrySDKLanguageGo, ++ semconv.TelemetrySDKVersion(sdk.Version()), + ), nil + } + +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +index 8e212b12182..f263919f6ec 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +@@ -71,6 +71,11 @@ func WithHost() Option { + return WithDetectors(host{}) + } + ++// WithHostID adds host ID information to the configured resource. ++func WithHostID() Option { ++ return WithDetectors(hostIDDetector{}) ++} ++ + // WithTelemetrySDK adds TelemetrySDK version info to the configured resource. + func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) +@@ -194,6 +199,8 @@ func WithContainer() Option { + } + + // WithContainerID adds an attribute with the id of the container to the configured Resource. ++// Note: WithContainerID will not extract the correct container ID in an ECS environment. ++// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). + func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +index 7a897e96977..3d536228283 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +@@ -22,7 +22,7 @@ import ( + "os" + "regexp" + +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + ) + + type containerIDProvider func() (string, error) +@@ -47,7 +47,7 @@ func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) + if containerID == "" { + return Empty(), nil + } +- return NewWithAttributes(semconv.SchemaURL, semconv.ContainerIDKey.String(containerID)), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil + } + + var ( +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go +index 9aab3d83934..d55a50b0dc2 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go +@@ -25,4 +25,7 @@ + // OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret + // the value as a list of comma delimited key/value pairs + // (e.g. `=,=,...`). ++// ++// While this package provides a stable API, ++// the attributes added by resource detectors may change. + package resource // import "go.opentelemetry.io/otel/sdk/resource" +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +index eb22d007922..e29ae563a69 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +@@ -17,25 +17,25 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" + import ( + "context" + "fmt" ++ "net/url" + "os" + "strings" + ++ "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + ) + + const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. +- resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" ++ resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" + ) + +-var ( +- // errMissingValue is returned when a resource value is missing. +- errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) +-) ++// errMissingValue is returned when a resource value is missing. ++var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) + + // fromEnv is a Detector that implements the Detector and collects + // resources from environment. This Detector is included as a +@@ -57,7 +57,7 @@ func (fromEnv) Detect(context.Context) (*Resource, error) { + var res *Resource + + if svcName != "" { +- res = NewSchemaless(semconv.ServiceNameKey.String(svcName)) ++ res = NewSchemaless(semconv.ServiceName(svcName)) + } + + r2, err := constructOTResources(attrs) +@@ -80,16 +80,23 @@ func constructOTResources(s string) (*Resource, error) { + return Empty(), nil + } + pairs := strings.Split(s, ",") +- attrs := []attribute.KeyValue{} ++ var attrs []attribute.KeyValue + var invalid []string + for _, p := range pairs { +- field := strings.SplitN(p, "=", 2) +- if len(field) != 2 { ++ k, v, found := strings.Cut(p, "=") ++ if !found { + invalid = append(invalid, p) + continue + } +- k, v := strings.TrimSpace(field[0]), strings.TrimSpace(field[1]) +- attrs = append(attrs, attribute.String(k, v)) ++ key := strings.TrimSpace(k) ++ val, err := url.PathUnescape(strings.TrimSpace(v)) ++ if err != nil { ++ // Retain original value if decoding fails, otherwise it will be ++ // an empty string. ++ val = v ++ otel.Handle(err) ++ } ++ attrs = append(attrs, attribute.String(key, val)) + } + var err error + if len(invalid) > 0 { +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +new file mode 100644 +index 00000000000..fb1ebf2cab2 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +@@ -0,0 +1,120 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++import ( ++ "context" ++ "errors" ++ "strings" ++ ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ++) ++ ++type hostIDProvider func() (string, error) ++ ++var defaultHostIDProvider hostIDProvider = platformHostIDReader.read ++ ++var hostID = defaultHostIDProvider ++ ++type hostIDReader interface { ++ read() (string, error) ++} ++ ++type fileReader func(string) (string, error) ++ ++type commandExecutor func(string, ...string) (string, error) ++ ++// hostIDReaderBSD implements hostIDReader. ++type hostIDReaderBSD struct { ++ execCommand commandExecutor ++ readFile fileReader ++} ++ ++// read attempts to read the machine-id from /etc/hostid. If not found it will ++// execute `kenv -q smbios.system.uuid`. If neither location yields an id an ++// error will be returned. ++func (r *hostIDReaderBSD) read() (string, error) { ++ if result, err := r.readFile("/etc/hostid"); err == nil { ++ return strings.TrimSpace(result), nil ++ } ++ ++ if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { ++ return strings.TrimSpace(result), nil ++ } ++ ++ return "", errors.New("host id not found in: /etc/hostid or kenv") ++} ++ ++// hostIDReaderDarwin implements hostIDReader. ++type hostIDReaderDarwin struct { ++ execCommand commandExecutor ++} ++ ++// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id ++// from the IOPlatformUUID line. If the command fails or the uuid cannot be ++// parsed an error will be returned. ++func (r *hostIDReaderDarwin) read() (string, error) { ++ result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice") ++ if err != nil { ++ return "", err ++ } ++ ++ lines := strings.Split(result, "\n") ++ for _, line := range lines { ++ if strings.Contains(line, "IOPlatformUUID") { ++ parts := strings.Split(line, " = ") ++ if len(parts) == 2 { ++ return strings.Trim(parts[1], "\""), nil ++ } ++ break ++ } ++ } ++ ++ return "", errors.New("could not parse IOPlatformUUID") ++} ++ ++type hostIDReaderLinux struct { ++ readFile fileReader ++} ++ ++// read attempts to read the machine-id from /etc/machine-id followed by ++// /var/lib/dbus/machine-id. If neither location yields an ID an error will ++// be returned. ++func (r *hostIDReaderLinux) read() (string, error) { ++ if result, err := r.readFile("/etc/machine-id"); err == nil { ++ return strings.TrimSpace(result), nil ++ } ++ ++ if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil { ++ return strings.TrimSpace(result), nil ++ } ++ ++ return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id") ++} ++ ++type hostIDDetector struct{} ++ ++// Detect returns a *Resource containing the platform specific host id. ++func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { ++ hostID, err := hostID() ++ if err != nil { ++ return nil, err ++ } ++ ++ return NewWithAttributes( ++ semconv.SchemaURL, ++ semconv.HostID(hostID), ++ ), nil ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +similarity index 54% +rename from vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go +rename to vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +index e1bbb850d76..1778bbacf05 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +@@ -12,19 +12,12 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package instrument // import "go.opentelemetry.io/otel/metric/instrument" ++//go:build dragonfly || freebsd || netbsd || openbsd || solaris ++// +build dragonfly freebsd netbsd openbsd solaris + +-// Asynchronous instruments are instruments that are updated within a Callback. +-// If an instrument is observed outside of it's callback it should be an error. +-// +-// This interface is used as a grouping mechanism. +-type Asynchronous interface { +- asynchronous() +-} ++package resource // import "go.opentelemetry.io/otel/sdk/resource" + +-// Synchronous instruments are updated in line with application code. +-// +-// This interface is used as a grouping mechanism. +-type Synchronous interface { +- synchronous() ++var platformHostIDReader hostIDReader = &hostIDReaderBSD{ ++ execCommand: execCommand, ++ readFile: readFile, + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go +new file mode 100644 +index 00000000000..ba41409b23c +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go +@@ -0,0 +1,19 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++var platformHostIDReader hostIDReader = &hostIDReaderDarwin{ ++ execCommand: execCommand, ++} +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go +similarity index 50% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go +rename to vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go +index b3fd45d9d31..207acb0ed3a 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go +@@ -12,23 +12,18 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-// Package internal contains common functionality for all OTLP exporters. +-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" ++//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris + +-import ( +- "fmt" +- "path" +- "strings" +-) ++package resource // import "go.opentelemetry.io/otel/sdk/resource" + +-// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead. +-func CleanPath(urlPath string, defaultPath string) string { +- tmp := path.Clean(strings.TrimSpace(urlPath)) +- if tmp == "." { +- return defaultPath +- } +- if !path.IsAbs(tmp) { +- tmp = fmt.Sprintf("/%s", tmp) ++import "os/exec" ++ ++func execCommand(name string, arg ...string) (string, error) { ++ cmd := exec.Command(name, arg...) ++ b, err := cmd.Output() ++ if err != nil { ++ return "", err + } +- return tmp ++ ++ return string(b), nil + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +new file mode 100644 +index 00000000000..410579b8fc9 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +@@ -0,0 +1,22 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++//go:build linux ++// +build linux ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++var platformHostIDReader hostIDReader = &hostIDReaderLinux{ ++ readFile: readFile, ++} +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go +new file mode 100644 +index 00000000000..721e3ca6e7d +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go +@@ -0,0 +1,28 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++import "os" ++ ++func readFile(filename string) (string, error) { ++ b, err := os.ReadFile(filename) ++ if err != nil { ++ return "", err ++ } ++ ++ return string(b), nil ++} +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +new file mode 100644 +index 00000000000..89df9d6882e +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +@@ -0,0 +1,36 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// +build !darwin ++// +build !dragonfly ++// +build !freebsd ++// +build !linux ++// +build !netbsd ++// +build !openbsd ++// +build !solaris ++// +build !windows ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++// hostIDReaderUnsupported is a placeholder implementation for operating systems ++// for which this project currently doesn't support host.id ++// attribute detection. See build tags declaration early on this file ++// for a list of unsupported OSes. ++type hostIDReaderUnsupported struct{} ++ ++func (*hostIDReaderUnsupported) read() (string, error) { ++ return "", nil ++} ++ ++var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{} +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +new file mode 100644 +index 00000000000..5b431c6ee6e +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +@@ -0,0 +1,48 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++//go:build windows ++// +build windows ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++import ( ++ "golang.org/x/sys/windows/registry" ++) ++ ++// implements hostIDReader ++type hostIDReaderWindows struct{} ++ ++// read reads MachineGuid from the windows registry key: ++// SOFTWARE\Microsoft\Cryptography ++func (*hostIDReaderWindows) read() (string, error) { ++ k, err := registry.OpenKey( ++ registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, ++ registry.QUERY_VALUE|registry.WOW64_64KEY, ++ ) ++ ++ if err != nil { ++ return "", err ++ } ++ defer k.Close() ++ ++ guid, _, err := k.GetStringValue("MachineGuid") ++ if err != nil { ++ return "", err ++ } ++ ++ return guid, nil ++} ++ ++var platformHostIDReader hostIDReader = &hostIDReaderWindows{} +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +index 3b4d0c14dbd..0cbd559739c 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +@@ -19,7 +19,7 @@ import ( + "strings" + + "go.opentelemetry.io/otel/attribute" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + ) + + type osDescriptionProvider func() (string, error) +@@ -36,8 +36,10 @@ func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider + } + +-type osTypeDetector struct{} +-type osDescriptionDetector struct{} ++type ( ++ osTypeDetector struct{} ++ osDescriptionDetector struct{} ++) + + // Detect returns a *Resource that describes the operating system type the + // service is running on. +@@ -56,14 +58,13 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + // service is running on. + func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() +- + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, +- semconv.OSDescriptionKey.String(description), ++ semconv.OSDescription(description), + ), nil + } + +@@ -75,6 +76,7 @@ func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ ++ "aix": semconv.OSTypeAIX, + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, +@@ -83,6 +85,7 @@ func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, ++ "zos": semconv.OSTypeZOS, + } + + var osTypeAttribute attribute.KeyValue +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +index fba6790e445..c771942deec 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +@@ -85,14 +85,14 @@ func skip(line string) bool { + // parse attempts to split the provided line on the first '=' character, and then + // sanitize each side of the split before returning them as a key-value pair. + func parse(line string) (string, string, bool) { +- parts := strings.SplitN(line, "=", 2) ++ k, v, found := strings.Cut(line, "=") + +- if len(parts) != 2 || len(parts[0]) == 0 { ++ if !found || len(k) == 0 { + return "", "", false + } + +- key := strings.TrimSpace(parts[0]) +- value := unescape(unquote(strings.TrimSpace(parts[1]))) ++ key := strings.TrimSpace(k) ++ value := unescape(unquote(strings.TrimSpace(v))) + + return key, value, true + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +index 9a169f663fb..ecdd11dd762 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +@@ -22,17 +22,19 @@ import ( + "path/filepath" + "runtime" + +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + ) + +-type pidProvider func() int +-type executablePathProvider func() (string, error) +-type commandArgsProvider func() []string +-type ownerProvider func() (*user.User, error) +-type runtimeNameProvider func() string +-type runtimeVersionProvider func() string +-type runtimeOSProvider func() string +-type runtimeArchProvider func() string ++type ( ++ pidProvider func() int ++ executablePathProvider func() (string, error) ++ commandArgsProvider func() []string ++ ownerProvider func() (*user.User, error) ++ runtimeNameProvider func() string ++ runtimeVersionProvider func() string ++ runtimeOSProvider func() string ++ runtimeArchProvider func() string ++) + + var ( + defaultPidProvider pidProvider = os.Getpid +@@ -108,26 +110,28 @@ func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider + } + +-type processPIDDetector struct{} +-type processExecutableNameDetector struct{} +-type processExecutablePathDetector struct{} +-type processCommandArgsDetector struct{} +-type processOwnerDetector struct{} +-type processRuntimeNameDetector struct{} +-type processRuntimeVersionDetector struct{} +-type processRuntimeDescriptionDetector struct{} ++type ( ++ processPIDDetector struct{} ++ processExecutableNameDetector struct{} ++ processExecutablePathDetector struct{} ++ processCommandArgsDetector struct{} ++ processOwnerDetector struct{} ++ processRuntimeNameDetector struct{} ++ processRuntimeVersionDetector struct{} ++ processRuntimeDescriptionDetector struct{} ++) + + // Detect returns a *Resource that describes the process identifier (PID) of the + // executing process. + func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPIDKey.Int(pid())), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil + } + + // Detect returns a *Resource that describes the name of the process executable. + func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableNameKey.String(executableName)), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil + } + + // Detect returns a *Resource that describes the full path of the process executable. +@@ -137,13 +141,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err + return nil, err + } + +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePathKey.String(executablePath)), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil + } + + // Detect returns a *Resource that describes all the command arguments as received + // by the process. + func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgsKey.StringSlice(commandArgs())), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil + } + + // Detect returns a *Resource that describes the username of the user that owns the +@@ -154,18 +158,18 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + return nil, err + } + +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwnerKey.String(owner.Username)), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil + } + + // Detect returns a *Resource that describes the name of the compiler used to compile + // this process image. + func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeNameKey.String(runtimeName())), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil + } + + // Detect returns a *Resource that describes the version of the runtime of this process. + func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersionKey.String(runtimeVersion())), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil + } + + // Detect returns a *Resource that describes the runtime of this process. +@@ -175,6 +179,6 @@ func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, + + return NewWithAttributes( + semconv.SchemaURL, +- semconv.ProcessRuntimeDescriptionKey.String(runtimeDescription), ++ semconv.ProcessRuntimeDescription(runtimeDescription), + ), nil + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +index c425ff05db5..176ff106668 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +@@ -17,7 +17,6 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" + import ( + "context" + "errors" +- "fmt" + "sync" + + "go.opentelemetry.io/otel" +@@ -37,7 +36,6 @@ type Resource struct { + } + + var ( +- emptyResource Resource + defaultResource *Resource + defaultResourceOnce sync.Once + ) +@@ -51,17 +49,8 @@ func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg = opt.apply(cfg) + } + +- resource, err := Detect(ctx, cfg.detectors...) +- +- var err2 error +- resource, err2 = Merge(resource, &Resource{schemaURL: cfg.schemaURL}) +- if err == nil { +- err = err2 +- } else if err2 != nil { +- err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) +- } +- +- return resource, err ++ r := &Resource{schemaURL: cfg.schemaURL} ++ return r, detect(ctx, r, cfg.detectors) + } + + // NewWithAttributes creates a resource from attrs and associates the resource with a +@@ -80,18 +69,18 @@ func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource + // of the attrs is known use NewWithAttributes instead. + func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { +- return &emptyResource ++ return &Resource{} + } + + // Ensure attributes comply with the specification: +- // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.0.1/specification/common/common.md#attributes ++ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { +- return &emptyResource ++ return &Resource{} + } + + return &Resource{attrs: s} //nolint +@@ -164,7 +153,7 @@ func (r *Resource) Equal(eq *Resource) bool { + // if resource b's value is empty. + // + // The SchemaURL of the resources will be merged according to the spec rules: +-// https://github.com/open-telemetry/opentelemetry-specification/blob/bad49c714a62da5493f2d1d9bafd7ebe8c8ce7eb/specification/resource/sdk.md#merge ++// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge + // If the resources have different non-empty schemaURL an empty resource and an error + // will be returned. + func Merge(a, b *Resource) (*Resource, error) { +@@ -205,7 +194,7 @@ func Merge(a, b *Resource) (*Resource, error) { + // Empty returns an instance of Resource with no attributes. It is + // equivalent to a `nil` Resource. + func Empty() *Resource { +- return &emptyResource ++ return &Resource{} + } + + // Default returns an instance of Resource with a default +@@ -224,7 +213,7 @@ func Default() *Resource { + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { +- defaultResource = &emptyResource ++ defaultResource = &Resource{} + } + }) + return defaultResource +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +index a2d7db49001..c9c7effbf38 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +@@ -16,7 +16,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" + + import ( + "context" +- "runtime" + "sync" + "sync/atomic" + "time" +@@ -84,6 +83,7 @@ type batchSpanProcessor struct { + stopWait sync.WaitGroup + stopOnce sync.Once + stopCh chan struct{} ++ stopped atomic.Bool + } + + var _ SpanProcessor = (*batchSpanProcessor)(nil) +@@ -91,7 +91,7 @@ var _ SpanProcessor = (*batchSpanProcessor)(nil) + // NewBatchSpanProcessor creates a new SpanProcessor that will send completed + // span batches to the exporter with the supplied options. + // +-// If the exporter is nil, the span processor will preform no action. ++// If the exporter is nil, the span processor will perform no action. + func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { + maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize) + maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) +@@ -137,6 +137,11 @@ func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) + + // OnEnd method enqueues a ReadOnlySpan for later processing. + func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { ++ // Do not enqueue spans after Shutdown. ++ if bsp.stopped.Load() { ++ return ++ } ++ + // Do not enqueue spans if we are just going to drop them. + if bsp.e == nil { + return +@@ -149,6 +154,7 @@ func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { + func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { + var err error + bsp.stopOnce.Do(func() { ++ bsp.stopped.Store(true) + wait := make(chan struct{}) + go func() { + close(bsp.stopCh) +@@ -181,11 +187,24 @@ func (f forceFlushSpan) SpanContext() trace.SpanContext { + + // ForceFlush exports all ended spans that have not yet been exported. + func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { ++ // Interrupt if context is already canceled. ++ if err := ctx.Err(); err != nil { ++ return err ++ } ++ ++ // Do nothing after Shutdown. ++ if bsp.stopped.Load() { ++ return nil ++ } ++ + var err error + if bsp.e != nil { + flushCh := make(chan struct{}) + if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { + select { ++ case <-bsp.stopCh: ++ // The batchSpanProcessor is Shutdown. ++ return nil + case <-flushCh: + // Processed any items in queue prior to ForceFlush being called + case <-ctx.Done(): +@@ -326,11 +345,9 @@ func (bsp *batchSpanProcessor) drainQueue() { + for { + select { + case sd := <-bsp.queue: +- if sd == nil { +- if err := bsp.exportSpans(ctx); err != nil { +- otel.Handle(err) +- } +- return ++ if _, ok := sd.(forceFlushSpan); ok { ++ // Ignore flush requests as they are not valid spans. ++ continue + } + + bsp.batchMutex.Lock() +@@ -344,7 +361,11 @@ func (bsp *batchSpanProcessor) drainQueue() { + } + } + default: +- close(bsp.queue) ++ // There are no more enqueued spans. Make final export. ++ if err := bsp.exportSpans(ctx); err != nil { ++ otel.Handle(err) ++ } ++ return + } + } + } +@@ -358,34 +379,11 @@ func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { + } + } + +-func recoverSendOnClosedChan() { +- x := recover() +- switch err := x.(type) { +- case nil: +- return +- case runtime.Error: +- if err.Error() == "send on closed channel" { +- return +- } +- } +- panic(x) +-} +- + func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + +- // This ensures the bsp.queue<- below does not panic as the +- // processor shuts down. +- defer recoverSendOnClosedChan() +- +- select { +- case <-bsp.stopCh: +- return false +- default: +- } +- + select { + case bsp.queue <- sd: + return true +@@ -399,16 +397,6 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) + return false + } + +- // This ensures the bsp.queue<- below does not panic as the +- // processor shuts down. +- defer recoverSendOnClosedChan() +- +- select { +- case <-bsp.stopCh: +- return false +- default: +- } +- + select { + case bsp.queue <- sd: + return true +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +index 292ea5481bc..7d46c4b48e5 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +@@ -25,6 +25,8 @@ import ( + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" ++ "go.opentelemetry.io/otel/trace/embedded" ++ "go.opentelemetry.io/otel/trace/noop" + ) + + const ( +@@ -73,9 +75,13 @@ func (cfg tracerProviderConfig) MarshalLog() interface{} { + // TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to + // instrumentation so it can trace operational flow through a system. + type TracerProvider struct { ++ embedded.TracerProvider ++ + mu sync.Mutex + namedTracer map[instrumentation.Scope]*tracer +- spanProcessors atomic.Value ++ spanProcessors atomic.Pointer[spanProcessorStates] ++ ++ isShutdown atomic.Bool + + // These fields are not protected by the lock mu. They are assumed to be + // immutable after creation of the TracerProvider. +@@ -116,12 +122,13 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { + spanLimits: o.spanLimits, + resource: o.resource, + } +- + global.Info("TracerProvider created", "config", o) + ++ spss := make(spanProcessorStates, 0, len(o.processors)) + for _, sp := range o.processors { +- tp.RegisterSpanProcessor(sp) ++ spss = append(spss, newSpanProcessorState(sp)) + } ++ tp.spanProcessors.Store(&spss) + + return tp + } +@@ -134,10 +141,11 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { + // + // This method is safe to be called concurrently. + func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { ++ // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown(). ++ if p.isShutdown.Load() { ++ return noop.NewTracerProvider().Tracer(name, opts...) ++ } + c := trace.NewTracerConfig(opts...) +- +- p.mu.Lock() +- defer p.mu.Unlock() + if name == "" { + name = defaultTracerName + } +@@ -146,57 +154,87 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } +- t, ok := p.namedTracer[is] +- if !ok { +- t = &tracer{ +- provider: p, +- instrumentationScope: is, ++ ++ t, ok := func() (trace.Tracer, bool) { ++ p.mu.Lock() ++ defer p.mu.Unlock() ++ // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran ++ // after the first check above but before we acquired the mutex. ++ if p.isShutdown.Load() { ++ return noop.NewTracerProvider().Tracer(name, opts...), true + } +- p.namedTracer[is] = t +- global.Info("Tracer created", "name", name, "version", c.InstrumentationVersion(), "schemaURL", c.SchemaURL()) ++ t, ok := p.namedTracer[is] ++ if !ok { ++ t = &tracer{ ++ provider: p, ++ instrumentationScope: is, ++ } ++ p.namedTracer[is] = t ++ } ++ return t, ok ++ }() ++ if !ok { ++ // This code is outside the mutex to not hold the lock while calling third party logging code: ++ // - That code may do slow things like I/O, which would prolong the duration the lock is held, ++ // slowing down all tracing consumers. ++ // - Logging code may be instrumented with tracing and deadlock because it could try ++ // acquiring the same non-reentrant mutex. ++ global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + } + return t + } + + // RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. +-func (p *TracerProvider) RegisterSpanProcessor(s SpanProcessor) { ++func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { ++ // This check prevents calls during a shutdown. ++ if p.isShutdown.Load() { ++ return ++ } + p.mu.Lock() + defer p.mu.Unlock() +- newSPS := spanProcessorStates{} +- if old, ok := p.spanProcessors.Load().(spanProcessorStates); ok { +- newSPS = append(newSPS, old...) +- } +- newSpanSync := &spanProcessorState{ +- sp: s, +- state: &sync.Once{}, ++ // This check prevents calls after a shutdown. ++ if p.isShutdown.Load() { ++ return + } +- newSPS = append(newSPS, newSpanSync) +- p.spanProcessors.Store(newSPS) ++ ++ current := p.getSpanProcessors() ++ newSPS := make(spanProcessorStates, 0, len(current)+1) ++ newSPS = append(newSPS, current...) ++ newSPS = append(newSPS, newSpanProcessorState(sp)) ++ p.spanProcessors.Store(&newSPS) + } + + // UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. +-func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) { ++func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { ++ // This check prevents calls during a shutdown. ++ if p.isShutdown.Load() { ++ return ++ } + p.mu.Lock() + defer p.mu.Unlock() +- spss := spanProcessorStates{} +- old, ok := p.spanProcessors.Load().(spanProcessorStates) +- if !ok || len(old) == 0 { ++ // This check prevents calls after a shutdown. ++ if p.isShutdown.Load() { ++ return ++ } ++ old := p.getSpanProcessors() ++ if len(old) == 0 { + return + } +- spss = append(spss, old...) ++ spss := make(spanProcessorStates, len(old)) ++ copy(spss, old) + + // stop the span processor if it is started and remove it from the list + var stopOnce *spanProcessorState + var idx int + for i, sps := range spss { +- if sps.sp == s { ++ if sps.sp == sp { + stopOnce = sps + idx = i + } + } + if stopOnce != nil { + stopOnce.state.Do(func() { +- if err := s.Shutdown(context.Background()); err != nil { ++ if err := sp.Shutdown(context.Background()); err != nil { + otel.Handle(err) + } + }) +@@ -207,16 +245,13 @@ func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) { + spss[len(spss)-1] = nil + spss = spss[:len(spss)-1] + +- p.spanProcessors.Store(spss) ++ p.spanProcessors.Store(&spss) + } + + // ForceFlush immediately exports all spans that have not yet been exported for + // all the registered span processors. + func (p *TracerProvider) ForceFlush(ctx context.Context) error { +- spss, ok := p.spanProcessors.Load().(spanProcessorStates) +- if !ok { +- return fmt.Errorf("failed to load span processors") +- } ++ spss := p.getSpanProcessors() + if len(spss) == 0 { + return nil + } +@@ -235,14 +270,23 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { + return nil + } + +-// Shutdown shuts down the span processors in the order they were registered. ++// Shutdown shuts down TracerProvider. All registered span processors are shut down ++// in the order they were registered and any held computational resources are released. ++// After Shutdown is called, all methods are no-ops. + func (p *TracerProvider) Shutdown(ctx context.Context) error { +- spss, ok := p.spanProcessors.Load().(spanProcessorStates) +- if !ok { +- return fmt.Errorf("failed to load span processors") ++ // This check prevents deadlocks in case of recursive shutdown. ++ if p.isShutdown.Load() { ++ return nil + } ++ p.mu.Lock() ++ defer p.mu.Unlock() ++ // This check prevents calls after a shutdown has already been done concurrently. ++ if !p.isShutdown.CompareAndSwap(false, true) { // did toggle? ++ return nil ++ } ++ + var retErr error +- for _, sps := range spss { ++ for _, sps := range p.getSpanProcessors() { + select { + case <-ctx.Done(): + return ctx.Err() +@@ -262,9 +306,14 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { + } + } + } ++ p.spanProcessors.Store(&spanProcessorStates{}) + return retErr + } + ++func (p *TracerProvider) getSpanProcessors() spanProcessorStates { ++ return *(p.spanProcessors.Load()) ++} ++ + // TracerProviderOption configures a TracerProvider. + type TracerProviderOption interface { + apply(tracerProviderConfig) tracerProviderConfig +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +index a6dcf4b307c..a7bc125b9e8 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +@@ -81,7 +81,7 @@ type traceIDRatioSampler struct { + + func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) +- x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 ++ x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 + if x < ts.traceIDUpperBound { + return SamplingResult{ + Decision: RecordAndSample, +@@ -158,15 +158,15 @@ func NeverSample() Sampler { + return alwaysOffSampler{} + } + +-// ParentBased returns a composite sampler which behaves differently, ++// ParentBased returns a sampler decorator which behaves differently, + // based on the parent of the span. If the span has no parent, +-// the root(Sampler) is used to make sampling decision. If the span has ++// the decorated sampler is used to make sampling decision. If the span has + // a parent, depending on whether the parent is remote and whether it + // is sampled, one of the following samplers will apply: +-// - remoteParentSampled(Sampler) (default: AlwaysOn) +-// - remoteParentNotSampled(Sampler) (default: AlwaysOff) +-// - localParentSampled(Sampler) (default: AlwaysOn) +-// - localParentNotSampled(Sampler) (default: AlwaysOff) ++// - remoteParentSampled(Sampler) (default: AlwaysOn) ++// - remoteParentNotSampled(Sampler) (default: AlwaysOff) ++// - localParentSampled(Sampler) (default: AlwaysOn) ++// - localParentNotSampled(Sampler) (default: AlwaysOff) + func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { + return parentBased{ + root: root, +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +index e8530a95932..f8770fff79b 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +@@ -19,12 +19,13 @@ import ( + "sync" + + "go.opentelemetry.io/otel" ++ "go.opentelemetry.io/otel/internal/global" + ) + + // simpleSpanProcessor is a SpanProcessor that synchronously sends all + // completed Spans to a trace.Exporter immediately. + type simpleSpanProcessor struct { +- exporterMu sync.RWMutex ++ exporterMu sync.Mutex + exporter SpanExporter + stopOnce sync.Once + } +@@ -43,6 +44,8 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { + ssp := &simpleSpanProcessor{ + exporter: exporter, + } ++ global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") ++ + return ssp + } + +@@ -51,8 +54,8 @@ func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} + + // OnEnd immediately exports a ReadOnlySpan. + func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { +- ssp.exporterMu.RLock() +- defer ssp.exporterMu.RUnlock() ++ ssp.exporterMu.Lock() ++ defer ssp.exporterMu.Unlock() + + if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { + if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +index 449cf6c2552..36dbf67764b 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +@@ -30,8 +30,9 @@ import ( + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/internal" + "go.opentelemetry.io/otel/sdk/resource" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + "go.opentelemetry.io/otel/trace" ++ "go.opentelemetry.io/otel/trace/embedded" + ) + + // ReadOnlySpan allows reading information from the data structure underlying a +@@ -108,6 +109,8 @@ type ReadWriteSpan interface { + // recordingSpan is an implementation of the OpenTelemetry Span API + // representing the individual component of a trace that is sampled. + type recordingSpan struct { ++ embedded.Span ++ + // mu protects the contents of this span. + mu sync.Mutex + +@@ -158,8 +161,10 @@ type recordingSpan struct { + tracer *tracer + } + +-var _ ReadWriteSpan = (*recordingSpan)(nil) +-var _ runtimeTracer = (*recordingSpan)(nil) ++var ( ++ _ ReadWriteSpan = (*recordingSpan)(nil) ++ _ runtimeTracer = (*recordingSpan)(nil) ++) + + // SpanContext returns the SpanContext of this span. + func (s *recordingSpan) SpanContext() trace.SpanContext { +@@ -189,15 +194,18 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) { + if !s.IsRecording() { + return + } ++ s.mu.Lock() ++ defer s.mu.Unlock() ++ if s.status.Code > code { ++ return ++ } + + status := Status{Code: code} + if code == codes.Error { + status.Description = description + } + +- s.mu.Lock() + s.status = status +- s.mu.Unlock() + } + + // SetAttributes sets attributes of this span. +@@ -299,7 +307,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { + // most a length of limit. Each string slice value is truncated in this fashion + // (the slice length itself is unaffected). + // +-// No truncation is perfromed for a negative limit. ++// No truncation is performed for a negative limit. + func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { + if limit < 0 { + return attr +@@ -310,26 +318,13 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { + return attr.Key.String(safeTruncate(v, limit)) + } + case attribute.STRINGSLICE: +- // Do no mutate the original, make a copy. +- trucated := attr.Key.StringSlice(attr.Value.AsStringSlice()) +- // Do not do this. +- // +- // v := trucated.Value.AsStringSlice() +- // cp := make([]string, len(v)) +- // /* Copy and truncate values to cp ... */ +- // trucated.Value = attribute.StringSliceValue(cp) +- // +- // Copying the []string and then assigning it back as a new value with +- // attribute.StringSliceValue will copy the data twice. Instead, we +- // already made a copy above that only this function owns, update the +- // underlying slice data of our copy. +- v := trucated.Value.AsStringSlice() ++ v := attr.Value.AsStringSlice() + for i := range v { + if len(v[i]) > limit { + v[i] = safeTruncate(v[i], limit) + } + } +- return trucated ++ return attr.Key.StringSlice(v) + } + return attr + } +@@ -393,14 +388,14 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { + defer panic(recovered) + opts := []trace.EventOption{ + trace.WithAttributes( +- semconv.ExceptionTypeKey.String(typeStr(recovered)), +- semconv.ExceptionMessageKey.String(fmt.Sprint(recovered)), ++ semconv.ExceptionType(typeStr(recovered)), ++ semconv.ExceptionMessage(fmt.Sprint(recovered)), + ), + } + + if config.StackTrace() { + opts = append(opts, trace.WithAttributes( +- semconv.ExceptionStacktraceKey.String(recordStackTrace()), ++ semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + +@@ -420,14 +415,13 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { + } + s.mu.Unlock() + +- if sps, ok := s.tracer.provider.spanProcessors.Load().(spanProcessorStates); ok { +- if len(sps) == 0 { +- return +- } +- snap := s.snapshot() +- for _, sp := range sps { +- sp.sp.OnEnd(snap) +- } ++ sps := s.tracer.provider.getSpanProcessors() ++ if len(sps) == 0 { ++ return ++ } ++ snap := s.snapshot() ++ for _, sp := range sps { ++ sp.sp.OnEnd(snap) + } + } + +@@ -441,14 +435,14 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { + } + + opts = append(opts, trace.WithAttributes( +- semconv.ExceptionTypeKey.String(typeStr(err)), +- semconv.ExceptionMessageKey.String(err.Error()), ++ semconv.ExceptionType(typeStr(err)), ++ semconv.ExceptionMessage(err.Error()), + )) + + c := trace.NewEventConfig(opts...) + if c.StackTrace() { + opts = append(opts, trace.WithAttributes( +- semconv.ExceptionStacktraceKey.String(recordStackTrace()), ++ semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + +@@ -783,6 +777,8 @@ func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { + // that wraps a SpanContext. It performs no operations other than to return + // the wrapped SpanContext or TracerProvider that created it. + type nonRecordingSpan struct { ++ embedded.Span ++ + // tracer is the SDK tracer that created this span. + tracer *tracer + sc trace.SpanContext +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go +index 9fb3d6eac3b..c9bd52f7ad4 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go +@@ -38,7 +38,7 @@ type SpanExporter interface { + // must never be done outside of a new major release. + + // Shutdown notifies the exporter of a pending halt to operations. The +- // exporter is expected to preform any cleanup or synchronization it ++ // exporter is expected to perform any cleanup or synchronization it + // requires while honoring all timeouts and cancellations contained in + // the passed context. + Shutdown(ctx context.Context) error +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go +index b649a2ff049..9c53657a719 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go +@@ -62,6 +62,11 @@ type SpanProcessor interface { + + type spanProcessorState struct { + sp SpanProcessor +- state *sync.Once ++ state sync.Once + } ++ ++func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { ++ return &spanProcessorState{sp: sp} ++} ++ + type spanProcessorStates []*spanProcessorState +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +index 7b11fc465c6..301e1a7abcc 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +@@ -20,9 +20,12 @@ import ( + + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/trace" ++ "go.opentelemetry.io/otel/trace/embedded" + ) + + type tracer struct { ++ embedded.Tracer ++ + provider *TracerProvider + instrumentationScope instrumentation.Scope + } +@@ -51,7 +54,7 @@ func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanS + + s := tr.newSpan(ctx, name, &config) + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { +- sps, _ := tr.provider.spanProcessors.Load().(spanProcessorStates) ++ sps := tr.provider.getSpanProcessors() + for _, sp := range sps { + sp.sp.OnStart(ctx, rw) + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go +index bfe73de9c41..ae8eae8e8be 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go +@@ -162,6 +162,7 @@ func (s spanSnapshot) Resource() *resource.Resource { return s.resource } + func (s spanSnapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope + } ++ + func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { + return s.instrumentationScope + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go +new file mode 100644 +index 00000000000..d3457ed1355 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package trace // import "go.opentelemetry.io/otel/sdk/trace" ++ ++// version is the current release version of the metric SDK in use. ++func version() string { ++ return "1.16.0-rc.1" ++} +diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go +new file mode 100644 +index 00000000000..7048c788e93 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/version.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package sdk // import "go.opentelemetry.io/otel/sdk" ++ ++// Version is the current release version of the OpenTelemetry SDK in use. ++func Version() string { ++ return "1.20.0" ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +index b580eedeff7..19c394c69b6 100644 +--- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go ++++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +@@ -232,10 +232,12 @@ func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, r + if route != "" { + attrs = append(attrs, sc.HTTPRouteKey.String(route)) + } +- if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { +- if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { +- attrs = append(attrs, sc.HTTPClientIPKey.String(addresses[0])) ++ if values := request.Header["X-Forwarded-For"]; len(values) > 0 { ++ addr := values[0] ++ if i := strings.Index(addr, ","); i > 0 { ++ addr = addr[:i] + } ++ attrs = append(attrs, sc.HTTPClientIPKey.String(addr)) + } + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +new file mode 100644 +index 00000000000..71a1f7748d5 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package semconv implements OpenTelemetry semantic conventions. ++// ++// OpenTelemetry semantic conventions are agreed standardized naming ++// patterns for OpenTelemetry things. This package represents the conventions ++// as of the v1.17.0 version of the OpenTelemetry specification. ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +new file mode 100644 +index 00000000000..679c40c4de4 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +@@ -0,0 +1,199 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// This semantic convention defines the attributes used to represent a feature ++// flag evaluation as an event. ++const ( ++ // FeatureFlagKeyKey is the attribute Key conforming to the ++ // "feature_flag.key" semantic conventions. It represents the unique ++ // identifier of the feature flag. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'logo-color' ++ FeatureFlagKeyKey = attribute.Key("feature_flag.key") ++ ++ // FeatureFlagProviderNameKey is the attribute Key conforming to the ++ // "feature_flag.provider_name" semantic conventions. It represents the ++ // name of the service provider that performs the flag evaluation. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'Flag Manager' ++ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") ++ ++ // FeatureFlagVariantKey is the attribute Key conforming to the ++ // "feature_flag.variant" semantic conventions. It represents the sHOULD be ++ // a semantic identifier for a value. If one is unavailable, a stringified ++ // version of the value can be used. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'red', 'true', 'on' ++ // Note: A semantic identifier, commonly referred to as a variant, provides ++ // a means ++ // for referring to a value without including the value itself. This can ++ // provide additional context for understanding the meaning behind a value. ++ // For example, the variant `red` maybe be used for the value `#c05543`. ++ // ++ // A stringified version of the value can be used in situations where a ++ // semantic identifier is unavailable. String representation of the value ++ // should be determined by the implementer. ++ FeatureFlagVariantKey = attribute.Key("feature_flag.variant") ++) ++ ++// FeatureFlagKey returns an attribute KeyValue conforming to the ++// "feature_flag.key" semantic conventions. It represents the unique identifier ++// of the feature flag. ++func FeatureFlagKey(val string) attribute.KeyValue { ++ return FeatureFlagKeyKey.String(val) ++} ++ ++// FeatureFlagProviderName returns an attribute KeyValue conforming to the ++// "feature_flag.provider_name" semantic conventions. It represents the name of ++// the service provider that performs the flag evaluation. ++func FeatureFlagProviderName(val string) attribute.KeyValue { ++ return FeatureFlagProviderNameKey.String(val) ++} ++ ++// FeatureFlagVariant returns an attribute KeyValue conforming to the ++// "feature_flag.variant" semantic conventions. It represents the sHOULD be a ++// semantic identifier for a value. If one is unavailable, a stringified ++// version of the value can be used. ++func FeatureFlagVariant(val string) attribute.KeyValue { ++ return FeatureFlagVariantKey.String(val) ++} ++ ++// RPC received/sent message. ++const ( ++ // MessageTypeKey is the attribute Key conforming to the "message.type" ++ // semantic conventions. It represents the whether this is a received or ++ // sent message. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageTypeKey = attribute.Key("message.type") ++ ++ // MessageIDKey is the attribute Key conforming to the "message.id" ++ // semantic conventions. It represents the mUST be calculated as two ++ // different counters starting from `1` one for sent messages and one for ++ // received message. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: This way we guarantee that the values will be consistent between ++ // different implementations. ++ MessageIDKey = attribute.Key("message.id") ++ ++ // MessageCompressedSizeKey is the attribute Key conforming to the ++ // "message.compressed_size" semantic conventions. It represents the ++ // compressed size of the message in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageCompressedSizeKey = attribute.Key("message.compressed_size") ++ ++ // MessageUncompressedSizeKey is the attribute Key conforming to the ++ // "message.uncompressed_size" semantic conventions. It represents the ++ // uncompressed size of the message in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ++) ++ ++var ( ++ // sent ++ MessageTypeSent = MessageTypeKey.String("SENT") ++ // received ++ MessageTypeReceived = MessageTypeKey.String("RECEIVED") ++) ++ ++// MessageID returns an attribute KeyValue conforming to the "message.id" ++// semantic conventions. It represents the mUST be calculated as two different ++// counters starting from `1` one for sent messages and one for received ++// message. ++func MessageID(val int) attribute.KeyValue { ++ return MessageIDKey.Int(val) ++} ++ ++// MessageCompressedSize returns an attribute KeyValue conforming to the ++// "message.compressed_size" semantic conventions. It represents the compressed ++// size of the message in bytes. ++func MessageCompressedSize(val int) attribute.KeyValue { ++ return MessageCompressedSizeKey.Int(val) ++} ++ ++// MessageUncompressedSize returns an attribute KeyValue conforming to the ++// "message.uncompressed_size" semantic conventions. It represents the ++// uncompressed size of the message in bytes. ++func MessageUncompressedSize(val int) attribute.KeyValue { ++ return MessageUncompressedSizeKey.Int(val) ++} ++ ++// The attributes used to report a single exception associated with a span. ++const ( ++ // ExceptionEscapedKey is the attribute Key conforming to the ++ // "exception.escaped" semantic conventions. It represents the sHOULD be ++ // set to true if the exception event is recorded at a point where it is ++ // known that the exception is escaping the scope of the span. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: An exception is considered to have escaped (or left) the scope of ++ // a span, ++ // if that span is ended while the exception is still logically "in ++ // flight". ++ // This may be actually "in flight" in some languages (e.g. if the ++ // exception ++ // is passed to a Context manager's `__exit__` method in Python) but will ++ // usually be caught at the point of recording the exception in most ++ // languages. ++ // ++ // It is usually not possible to determine at the point where an exception ++ // is thrown ++ // whether it will escape the scope of a span. ++ // However, it is trivial to know that an exception ++ // will escape, if one checks for an active exception just before ending ++ // the span, ++ // as done in the [example above](#recording-an-exception). ++ // ++ // It follows that an exception may still escape the scope of the span ++ // even if the `exception.escaped` attribute was not set or set to false, ++ // since the event might have been recorded at a time where it was not ++ // clear whether the exception will escape. ++ ExceptionEscapedKey = attribute.Key("exception.escaped") ++) ++ ++// ExceptionEscaped returns an attribute KeyValue conforming to the ++// "exception.escaped" semantic conventions. It represents the sHOULD be set to ++// true if the exception event is recorded at a point where it is known that ++// the exception is escaping the scope of the span. ++func ExceptionEscaped(val bool) attribute.KeyValue { ++ return ExceptionEscapedKey.Bool(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +similarity index 70% +rename from vendor/go.opentelemetry.io/otel/metric/unit/unit.go +rename to vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +index 647d77302de..9b8c559de42 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +@@ -12,14 +12,9 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package unit // import "go.opentelemetry.io/otel/metric/unit" ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +-// Unit is a determinate standard quantity of measurement. +-type Unit string +- +-// Units defined by OpenTelemetry. + const ( +- Dimensionless Unit = "1" +- Bytes Unit = "By" +- Milliseconds Unit = "ms" ++ // ExceptionEventName is the name of the Span event representing an exception. ++ ExceptionEventName = "exception" + ) +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +new file mode 100644 +index 00000000000..d5c4b5c136a +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +@@ -0,0 +1,21 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" ++ ++// HTTP scheme attributes. ++var ( ++ HTTPSchemeHTTP = HTTPSchemeKey.String("http") ++ HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ++) +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +new file mode 100644 +index 00000000000..39a2eab3a6a +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +@@ -0,0 +1,2010 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// The web browser in which the application represented by the resource is ++// running. The `browser.*` attributes MUST be used only for resources that ++// represent applications running in a web browser (regardless of whether ++// running on a mobile or desktop device). ++const ( ++ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" ++ // semantic conventions. It represents the array of brand name and version ++ // separated by a space ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.brands`). ++ BrowserBrandsKey = attribute.Key("browser.brands") ++ ++ // BrowserPlatformKey is the attribute Key conforming to the ++ // "browser.platform" semantic conventions. It represents the platform on ++ // which the browser is running ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Windows', 'macOS', 'Android' ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.platform`). If unavailable, the legacy ++ // `navigator.platform` API SHOULD NOT be used instead and this attribute ++ // SHOULD be left unset in order for the values to be consistent. ++ // The list of possible values is defined in the [W3C User-Agent Client ++ // Hints ++ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). ++ // Note that some (but not all) of these values can overlap with values in ++ // the [`os.type` and `os.name` attributes](./os.md). However, for ++ // consistency, the values in the `browser.platform` attribute should ++ // capture the exact value that the user agent provides. ++ BrowserPlatformKey = attribute.Key("browser.platform") ++ ++ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" ++ // semantic conventions. It represents a boolean that is true if the ++ // browser is running on a mobile device ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.mobile`). If unavailable, this attribute ++ // SHOULD be left unset. ++ BrowserMobileKey = attribute.Key("browser.mobile") ++ ++ // BrowserUserAgentKey is the attribute Key conforming to the ++ // "browser.user_agent" semantic conventions. It represents the full ++ // user-agent string provided by the browser ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) ++ // AppleWebKit/537.36 (KHTML, ' ++ // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' ++ // Note: The user-agent value SHOULD be provided only from browsers that do ++ // not have a mechanism to retrieve brands and platform individually from ++ // the User-Agent Client Hints API. To retrieve the value, the legacy ++ // `navigator.userAgent` API can be used. ++ BrowserUserAgentKey = attribute.Key("browser.user_agent") ++ ++ // BrowserLanguageKey is the attribute Key conforming to the ++ // "browser.language" semantic conventions. It represents the preferred ++ // language of the user using the browser ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'en', 'en-US', 'fr', 'fr-FR' ++ // Note: This value is intended to be taken from the Navigator API ++ // `navigator.language`. ++ BrowserLanguageKey = attribute.Key("browser.language") ++) ++ ++// BrowserBrands returns an attribute KeyValue conforming to the ++// "browser.brands" semantic conventions. It represents the array of brand name ++// and version separated by a space ++func BrowserBrands(val ...string) attribute.KeyValue { ++ return BrowserBrandsKey.StringSlice(val) ++} ++ ++// BrowserPlatform returns an attribute KeyValue conforming to the ++// "browser.platform" semantic conventions. It represents the platform on which ++// the browser is running ++func BrowserPlatform(val string) attribute.KeyValue { ++ return BrowserPlatformKey.String(val) ++} ++ ++// BrowserMobile returns an attribute KeyValue conforming to the ++// "browser.mobile" semantic conventions. It represents a boolean that is true ++// if the browser is running on a mobile device ++func BrowserMobile(val bool) attribute.KeyValue { ++ return BrowserMobileKey.Bool(val) ++} ++ ++// BrowserUserAgent returns an attribute KeyValue conforming to the ++// "browser.user_agent" semantic conventions. It represents the full user-agent ++// string provided by the browser ++func BrowserUserAgent(val string) attribute.KeyValue { ++ return BrowserUserAgentKey.String(val) ++} ++ ++// BrowserLanguage returns an attribute KeyValue conforming to the ++// "browser.language" semantic conventions. It represents the preferred ++// language of the user using the browser ++func BrowserLanguage(val string) attribute.KeyValue { ++ return BrowserLanguageKey.String(val) ++} ++ ++// A cloud environment (e.g. GCP, Azure, AWS) ++const ( ++ // CloudProviderKey is the attribute Key conforming to the "cloud.provider" ++ // semantic conventions. It represents the name of the cloud provider. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ CloudProviderKey = attribute.Key("cloud.provider") ++ ++ // CloudAccountIDKey is the attribute Key conforming to the ++ // "cloud.account.id" semantic conventions. It represents the cloud account ++ // ID the resource is assigned to. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '111111111111', 'opentelemetry' ++ CloudAccountIDKey = attribute.Key("cloud.account.id") ++ ++ // CloudRegionKey is the attribute Key conforming to the "cloud.region" ++ // semantic conventions. It represents the geographical region the resource ++ // is running. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-central1', 'us-east-1' ++ // Note: Refer to your provider's docs to see the available regions, for ++ // example [Alibaba Cloud ++ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS ++ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), ++ // [Azure ++ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), ++ // [Google Cloud regions](https://cloud.google.com/about/locations), or ++ // [Tencent Cloud ++ // regions](https://intl.cloud.tencent.com/document/product/213/6091). ++ CloudRegionKey = attribute.Key("cloud.region") ++ ++ // CloudAvailabilityZoneKey is the attribute Key conforming to the ++ // "cloud.availability_zone" semantic conventions. It represents the cloud ++ // regions often have multiple, isolated locations known as zones to ++ // increase availability. Availability zone represents the zone where the ++ // resource is running. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-east-1c' ++ // Note: Availability zones are called "zones" on Alibaba Cloud and Google ++ // Cloud. ++ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") ++ ++ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" ++ // semantic conventions. It represents the cloud platform in use. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: The prefix of the service SHOULD match the one specified in ++ // `cloud.provider`. ++ CloudPlatformKey = attribute.Key("cloud.platform") ++) ++ ++var ( ++ // Alibaba Cloud ++ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") ++ // Amazon Web Services ++ CloudProviderAWS = CloudProviderKey.String("aws") ++ // Microsoft Azure ++ CloudProviderAzure = CloudProviderKey.String("azure") ++ // Google Cloud Platform ++ CloudProviderGCP = CloudProviderKey.String("gcp") ++ // IBM Cloud ++ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") ++ // Tencent Cloud ++ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ++) ++ ++var ( ++ // Alibaba Cloud Elastic Compute Service ++ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") ++ // Alibaba Cloud Function Compute ++ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") ++ // Red Hat OpenShift on Alibaba Cloud ++ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") ++ // AWS Elastic Compute Cloud ++ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") ++ // AWS Elastic Container Service ++ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") ++ // AWS Elastic Kubernetes Service ++ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") ++ // AWS Lambda ++ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") ++ // AWS Elastic Beanstalk ++ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") ++ // AWS App Runner ++ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") ++ // Red Hat OpenShift on AWS (ROSA) ++ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") ++ // Azure Virtual Machines ++ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") ++ // Azure Container Instances ++ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") ++ // Azure Kubernetes Service ++ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") ++ // Azure Functions ++ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") ++ // Azure App Service ++ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") ++ // Azure Red Hat OpenShift ++ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") ++ // Google Cloud Compute Engine (GCE) ++ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") ++ // Google Cloud Run ++ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") ++ // Google Cloud Kubernetes Engine (GKE) ++ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") ++ // Google Cloud Functions (GCF) ++ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") ++ // Google Cloud App Engine (GAE) ++ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") ++ // Red Hat OpenShift on Google Cloud ++ CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") ++ // Red Hat OpenShift on IBM Cloud ++ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") ++ // Tencent Cloud Cloud Virtual Machine (CVM) ++ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") ++ // Tencent Cloud Elastic Kubernetes Service (EKS) ++ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") ++ // Tencent Cloud Serverless Cloud Function (SCF) ++ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ++) ++ ++// CloudAccountID returns an attribute KeyValue conforming to the ++// "cloud.account.id" semantic conventions. It represents the cloud account ID ++// the resource is assigned to. ++func CloudAccountID(val string) attribute.KeyValue { ++ return CloudAccountIDKey.String(val) ++} ++ ++// CloudRegion returns an attribute KeyValue conforming to the ++// "cloud.region" semantic conventions. It represents the geographical region ++// the resource is running. ++func CloudRegion(val string) attribute.KeyValue { ++ return CloudRegionKey.String(val) ++} ++ ++// CloudAvailabilityZone returns an attribute KeyValue conforming to the ++// "cloud.availability_zone" semantic conventions. It represents the cloud ++// regions often have multiple, isolated locations known as zones to increase ++// availability. Availability zone represents the zone where the resource is ++// running. ++func CloudAvailabilityZone(val string) attribute.KeyValue { ++ return CloudAvailabilityZoneKey.String(val) ++} ++ ++// Resources used by AWS Elastic Container Service (ECS). ++const ( ++ // AWSECSContainerARNKey is the attribute Key conforming to the ++ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon ++ // Resource Name (ARN) of an [ECS container ++ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' ++ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") ++ ++ // AWSECSClusterARNKey is the attribute Key conforming to the ++ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an ++ // [ECS ++ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' ++ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") ++ ++ // AWSECSLaunchtypeKey is the attribute Key conforming to the ++ // "aws.ecs.launchtype" semantic conventions. It represents the [launch ++ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) ++ // for an ECS task. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") ++ ++ // AWSECSTaskARNKey is the attribute Key conforming to the ++ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an ++ // [ECS task ++ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' ++ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") ++ ++ // AWSECSTaskFamilyKey is the attribute Key conforming to the ++ // "aws.ecs.task.family" semantic conventions. It represents the task ++ // definition family this task definition is a member of. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-family' ++ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") ++ ++ // AWSECSTaskRevisionKey is the attribute Key conforming to the ++ // "aws.ecs.task.revision" semantic conventions. It represents the revision ++ // for this task definition. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '8', '26' ++ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ++) ++ ++var ( ++ // ec2 ++ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") ++ // fargate ++ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ++) ++ ++// AWSECSContainerARN returns an attribute KeyValue conforming to the ++// "aws.ecs.container.arn" semantic conventions. It represents the Amazon ++// Resource Name (ARN) of an [ECS container ++// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). ++func AWSECSContainerARN(val string) attribute.KeyValue { ++ return AWSECSContainerARNKey.String(val) ++} ++ ++// AWSECSClusterARN returns an attribute KeyValue conforming to the ++// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS ++// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). ++func AWSECSClusterARN(val string) attribute.KeyValue { ++ return AWSECSClusterARNKey.String(val) ++} ++ ++// AWSECSTaskARN returns an attribute KeyValue conforming to the ++// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS ++// task ++// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). ++func AWSECSTaskARN(val string) attribute.KeyValue { ++ return AWSECSTaskARNKey.String(val) ++} ++ ++// AWSECSTaskFamily returns an attribute KeyValue conforming to the ++// "aws.ecs.task.family" semantic conventions. It represents the task ++// definition family this task definition is a member of. ++func AWSECSTaskFamily(val string) attribute.KeyValue { ++ return AWSECSTaskFamilyKey.String(val) ++} ++ ++// AWSECSTaskRevision returns an attribute KeyValue conforming to the ++// "aws.ecs.task.revision" semantic conventions. It represents the revision for ++// this task definition. ++func AWSECSTaskRevision(val string) attribute.KeyValue { ++ return AWSECSTaskRevisionKey.String(val) ++} ++ ++// Resources used by AWS Elastic Kubernetes Service (EKS). ++const ( ++ // AWSEKSClusterARNKey is the attribute Key conforming to the ++ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an ++ // EKS cluster. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' ++ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ++) ++ ++// AWSEKSClusterARN returns an attribute KeyValue conforming to the ++// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS ++// cluster. ++func AWSEKSClusterARN(val string) attribute.KeyValue { ++ return AWSEKSClusterARNKey.String(val) ++} ++ ++// Resources specific to Amazon Web Services. ++const ( ++ // AWSLogGroupNamesKey is the attribute Key conforming to the ++ // "aws.log.group.names" semantic conventions. It represents the name(s) of ++ // the AWS log group(s) an application is writing to. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/aws/lambda/my-function', 'opentelemetry-service' ++ // Note: Multiple log groups must be supported for cases like ++ // multi-container applications, where a single application has sidecar ++ // containers, and each write to their own log group. ++ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") ++ ++ // AWSLogGroupARNsKey is the attribute Key conforming to the ++ // "aws.log.group.arns" semantic conventions. It represents the Amazon ++ // Resource Name(s) (ARN) of the AWS log group(s). ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' ++ // Note: See the [log group ARN format ++ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). ++ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") ++ ++ // AWSLogStreamNamesKey is the attribute Key conforming to the ++ // "aws.log.stream.names" semantic conventions. It represents the name(s) ++ // of the AWS log stream(s) an application is writing to. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' ++ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") ++ ++ // AWSLogStreamARNsKey is the attribute Key conforming to the ++ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of ++ // the AWS log stream(s). ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' ++ // Note: See the [log stream ARN format ++ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). ++ // One log group can contain several log streams, so these ARNs necessarily ++ // identify both a log group and a log stream. ++ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ++) ++ ++// AWSLogGroupNames returns an attribute KeyValue conforming to the ++// "aws.log.group.names" semantic conventions. It represents the name(s) of the ++// AWS log group(s) an application is writing to. ++func AWSLogGroupNames(val ...string) attribute.KeyValue { ++ return AWSLogGroupNamesKey.StringSlice(val) ++} ++ ++// AWSLogGroupARNs returns an attribute KeyValue conforming to the ++// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource ++// Name(s) (ARN) of the AWS log group(s). ++func AWSLogGroupARNs(val ...string) attribute.KeyValue { ++ return AWSLogGroupARNsKey.StringSlice(val) ++} ++ ++// AWSLogStreamNames returns an attribute KeyValue conforming to the ++// "aws.log.stream.names" semantic conventions. It represents the name(s) of ++// the AWS log stream(s) an application is writing to. ++func AWSLogStreamNames(val ...string) attribute.KeyValue { ++ return AWSLogStreamNamesKey.StringSlice(val) ++} ++ ++// AWSLogStreamARNs returns an attribute KeyValue conforming to the ++// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the ++// AWS log stream(s). ++func AWSLogStreamARNs(val ...string) attribute.KeyValue { ++ return AWSLogStreamARNsKey.StringSlice(val) ++} ++ ++// A container instance. ++const ( ++ // ContainerNameKey is the attribute Key conforming to the "container.name" ++ // semantic conventions. It represents the container name used by container ++ // runtime. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-autoconf' ++ ContainerNameKey = attribute.Key("container.name") ++ ++ // ContainerIDKey is the attribute Key conforming to the "container.id" ++ // semantic conventions. It represents the container ID. Usually a UUID, as ++ // for example used to [identify Docker ++ // containers](https://docs.docker.com/engine/reference/run/#container-identification). ++ // The UUID might be abbreviated. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'a3bf90e006b2' ++ ContainerIDKey = attribute.Key("container.id") ++ ++ // ContainerRuntimeKey is the attribute Key conforming to the ++ // "container.runtime" semantic conventions. It represents the container ++ // runtime managing this container. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'docker', 'containerd', 'rkt' ++ ContainerRuntimeKey = attribute.Key("container.runtime") ++ ++ // ContainerImageNameKey is the attribute Key conforming to the ++ // "container.image.name" semantic conventions. It represents the name of ++ // the image the container was built on. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'gcr.io/opentelemetry/operator' ++ ContainerImageNameKey = attribute.Key("container.image.name") ++ ++ // ContainerImageTagKey is the attribute Key conforming to the ++ // "container.image.tag" semantic conventions. It represents the container ++ // image tag. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0.1' ++ ContainerImageTagKey = attribute.Key("container.image.tag") ++) ++ ++// ContainerName returns an attribute KeyValue conforming to the ++// "container.name" semantic conventions. It represents the container name used ++// by container runtime. ++func ContainerName(val string) attribute.KeyValue { ++ return ContainerNameKey.String(val) ++} ++ ++// ContainerID returns an attribute KeyValue conforming to the ++// "container.id" semantic conventions. It represents the container ID. Usually ++// a UUID, as for example used to [identify Docker ++// containers](https://docs.docker.com/engine/reference/run/#container-identification). ++// The UUID might be abbreviated. ++func ContainerID(val string) attribute.KeyValue { ++ return ContainerIDKey.String(val) ++} ++ ++// ContainerRuntime returns an attribute KeyValue conforming to the ++// "container.runtime" semantic conventions. It represents the container ++// runtime managing this container. ++func ContainerRuntime(val string) attribute.KeyValue { ++ return ContainerRuntimeKey.String(val) ++} ++ ++// ContainerImageName returns an attribute KeyValue conforming to the ++// "container.image.name" semantic conventions. It represents the name of the ++// image the container was built on. ++func ContainerImageName(val string) attribute.KeyValue { ++ return ContainerImageNameKey.String(val) ++} ++ ++// ContainerImageTag returns an attribute KeyValue conforming to the ++// "container.image.tag" semantic conventions. It represents the container ++// image tag. ++func ContainerImageTag(val string) attribute.KeyValue { ++ return ContainerImageTagKey.String(val) ++} ++ ++// The software deployment. ++const ( ++ // DeploymentEnvironmentKey is the attribute Key conforming to the ++ // "deployment.environment" semantic conventions. It represents the name of ++ // the [deployment ++ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka ++ // deployment tier). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'staging', 'production' ++ DeploymentEnvironmentKey = attribute.Key("deployment.environment") ++) ++ ++// DeploymentEnvironment returns an attribute KeyValue conforming to the ++// "deployment.environment" semantic conventions. It represents the name of the ++// [deployment ++// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka ++// deployment tier). ++func DeploymentEnvironment(val string) attribute.KeyValue { ++ return DeploymentEnvironmentKey.String(val) ++} ++ ++// The device on which the process represented by this resource is running. ++const ( ++ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic ++ // conventions. It represents a unique identifier representing the device ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' ++ // Note: The device identifier MUST only be defined using the values ++ // outlined below. This value is not an advertising identifier and MUST NOT ++ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal ++ // to the [vendor ++ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). ++ // On Android (Java or Kotlin), this value MUST be equal to the Firebase ++ // Installation ID or a globally unique UUID which is persisted across ++ // sessions in your application. More information can be found ++ // [here](https://developer.android.com/training/articles/user-data-ids) on ++ // best practices and exact implementation details. Caution should be taken ++ // when storing personal data or anything which can identify a user. GDPR ++ // and data protection laws may apply, ensure you do your own due ++ // diligence. ++ DeviceIDKey = attribute.Key("device.id") ++ ++ // DeviceModelIdentifierKey is the attribute Key conforming to the ++ // "device.model.identifier" semantic conventions. It represents the model ++ // identifier for the device ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iPhone3,4', 'SM-G920F' ++ // Note: It's recommended this value represents a machine readable version ++ // of the model identifier rather than the market or consumer-friendly name ++ // of the device. ++ DeviceModelIdentifierKey = attribute.Key("device.model.identifier") ++ ++ // DeviceModelNameKey is the attribute Key conforming to the ++ // "device.model.name" semantic conventions. It represents the marketing ++ // name for the device model ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' ++ // Note: It's recommended this value represents a human readable version of ++ // the device model rather than a machine readable alternative. ++ DeviceModelNameKey = attribute.Key("device.model.name") ++ ++ // DeviceManufacturerKey is the attribute Key conforming to the ++ // "device.manufacturer" semantic conventions. It represents the name of ++ // the device manufacturer ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Apple', 'Samsung' ++ // Note: The Android OS provides this field via ++ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). ++ // iOS apps SHOULD hardcode the value `Apple`. ++ DeviceManufacturerKey = attribute.Key("device.manufacturer") ++) ++ ++// DeviceID returns an attribute KeyValue conforming to the "device.id" ++// semantic conventions. It represents a unique identifier representing the ++// device ++func DeviceID(val string) attribute.KeyValue { ++ return DeviceIDKey.String(val) ++} ++ ++// DeviceModelIdentifier returns an attribute KeyValue conforming to the ++// "device.model.identifier" semantic conventions. It represents the model ++// identifier for the device ++func DeviceModelIdentifier(val string) attribute.KeyValue { ++ return DeviceModelIdentifierKey.String(val) ++} ++ ++// DeviceModelName returns an attribute KeyValue conforming to the ++// "device.model.name" semantic conventions. It represents the marketing name ++// for the device model ++func DeviceModelName(val string) attribute.KeyValue { ++ return DeviceModelNameKey.String(val) ++} ++ ++// DeviceManufacturer returns an attribute KeyValue conforming to the ++// "device.manufacturer" semantic conventions. It represents the name of the ++// device manufacturer ++func DeviceManufacturer(val string) attribute.KeyValue { ++ return DeviceManufacturerKey.String(val) ++} ++ ++// A serverless instance. ++const ( ++ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic ++ // conventions. It represents the name of the single function that this ++ // runtime instance executes. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'my-function', 'myazurefunctionapp/some-function-name' ++ // Note: This is the name of the function as configured/deployed on the ++ // FaaS ++ // platform and is usually different from the name of the callback ++ // function (which may be stored in the ++ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) ++ // span attributes). ++ // ++ // For some cloud providers, the above definition is ambiguous. The ++ // following ++ // definition of function name MUST be used for this attribute ++ // (and consequently the span name) for the listed cloud ++ // providers/products: ++ // ++ // * **Azure:** The full name `/`, i.e., function app name ++ // followed by a forward slash followed by the function name (this form ++ // can also be seen in the resource JSON for the function). ++ // This means that a span attribute MUST be used, as an Azure function ++ // app can host multiple functions that would usually share ++ // a TracerProvider (see also the `faas.id` attribute). ++ FaaSNameKey = attribute.Key("faas.name") ++ ++ // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic ++ // conventions. It represents the unique ID of the single function that ++ // this runtime instance executes. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' ++ // Note: On some cloud providers, it may not be possible to determine the ++ // full ID at startup, ++ // so consider setting `faas.id` as a span attribute instead. ++ // ++ // The exact value to use for `faas.id` depends on the cloud provider: ++ // ++ // * **AWS Lambda:** The function ++ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). ++ // Take care not to use the "invoked ARN" directly but replace any ++ // [alias ++ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) ++ // with the resolved function version, as the same runtime instance may ++ // be invokable with ++ // multiple different aliases. ++ // * **GCP:** The [URI of the ++ // resource](https://cloud.google.com/iam/docs/full-resource-names) ++ // * **Azure:** The [Fully Qualified Resource ++ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) ++ // of the invoked function, ++ // *not* the function app, having the form ++ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. ++ // This means that a span attribute MUST be used, as an Azure function ++ // app can host multiple functions that would usually share ++ // a TracerProvider. ++ FaaSIDKey = attribute.Key("faas.id") ++ ++ // FaaSVersionKey is the attribute Key conforming to the "faas.version" ++ // semantic conventions. It represents the immutable version of the ++ // function being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '26', 'pinkfroid-00002' ++ // Note: Depending on the cloud provider and platform, use: ++ // ++ // * **AWS Lambda:** The [function ++ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) ++ // (an integer represented as a decimal string). ++ // * **Google Cloud Run:** The ++ // [revision](https://cloud.google.com/run/docs/managing/revisions) ++ // (i.e., the function name plus the revision suffix). ++ // * **Google Cloud Functions:** The value of the ++ // [`K_REVISION` environment ++ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). ++ // * **Azure Functions:** Not applicable. Do not set this attribute. ++ FaaSVersionKey = attribute.Key("faas.version") ++ ++ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" ++ // semantic conventions. It represents the execution environment ID as a ++ // string, that will be potentially reused for other invocations to the ++ // same function/function version. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' ++ // Note: * **AWS Lambda:** Use the (full) log stream name. ++ FaaSInstanceKey = attribute.Key("faas.instance") ++ ++ // FaaSMaxMemoryKey is the attribute Key conforming to the ++ // "faas.max_memory" semantic conventions. It represents the amount of ++ // memory available to the serverless function in MiB. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 128 ++ // Note: It's recommended to set this attribute since e.g. too little ++ // memory can easily stop a Java AWS Lambda function from working ++ // correctly. On AWS Lambda, the environment variable ++ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. ++ FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ++) ++ ++// FaaSName returns an attribute KeyValue conforming to the "faas.name" ++// semantic conventions. It represents the name of the single function that ++// this runtime instance executes. ++func FaaSName(val string) attribute.KeyValue { ++ return FaaSNameKey.String(val) ++} ++ ++// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic ++// conventions. It represents the unique ID of the single function that this ++// runtime instance executes. ++func FaaSID(val string) attribute.KeyValue { ++ return FaaSIDKey.String(val) ++} ++ ++// FaaSVersion returns an attribute KeyValue conforming to the ++// "faas.version" semantic conventions. It represents the immutable version of ++// the function being executed. ++func FaaSVersion(val string) attribute.KeyValue { ++ return FaaSVersionKey.String(val) ++} ++ ++// FaaSInstance returns an attribute KeyValue conforming to the ++// "faas.instance" semantic conventions. It represents the execution ++// environment ID as a string, that will be potentially reused for other ++// invocations to the same function/function version. ++func FaaSInstance(val string) attribute.KeyValue { ++ return FaaSInstanceKey.String(val) ++} ++ ++// FaaSMaxMemory returns an attribute KeyValue conforming to the ++// "faas.max_memory" semantic conventions. It represents the amount of memory ++// available to the serverless function in MiB. ++func FaaSMaxMemory(val int) attribute.KeyValue { ++ return FaaSMaxMemoryKey.Int(val) ++} ++ ++// A host is defined as a general computing instance. ++const ( ++ // HostIDKey is the attribute Key conforming to the "host.id" semantic ++ // conventions. It represents the unique host ID. For Cloud, this must be ++ // the instance_id assigned by the cloud provider. For non-containerized ++ // Linux systems, the `machine-id` located in `/etc/machine-id` or ++ // `/var/lib/dbus/machine-id` may be used. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'fdbf79e8af94cb7f9e8df36789187052' ++ HostIDKey = attribute.Key("host.id") ++ ++ // HostNameKey is the attribute Key conforming to the "host.name" semantic ++ // conventions. It represents the name of the host. On Unix systems, it may ++ // contain what the hostname command returns, or the fully qualified ++ // hostname, or another name specified by the user. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-test' ++ HostNameKey = attribute.Key("host.name") ++ ++ // HostTypeKey is the attribute Key conforming to the "host.type" semantic ++ // conventions. It represents the type of host. For Cloud, this must be the ++ // machine type. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'n1-standard-1' ++ HostTypeKey = attribute.Key("host.type") ++ ++ // HostArchKey is the attribute Key conforming to the "host.arch" semantic ++ // conventions. It represents the CPU architecture the host system is ++ // running on. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ HostArchKey = attribute.Key("host.arch") ++ ++ // HostImageNameKey is the attribute Key conforming to the ++ // "host.image.name" semantic conventions. It represents the name of the VM ++ // image or OS install the host was instantiated from. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' ++ HostImageNameKey = attribute.Key("host.image.name") ++ ++ // HostImageIDKey is the attribute Key conforming to the "host.image.id" ++ // semantic conventions. It represents the vM image ID. For Cloud, this ++ // value is from the provider. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'ami-07b06b442921831e5' ++ HostImageIDKey = attribute.Key("host.image.id") ++ ++ // HostImageVersionKey is the attribute Key conforming to the ++ // "host.image.version" semantic conventions. It represents the version ++ // string of the VM image as defined in [Version ++ // Attributes](README.md#version-attributes). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0.1' ++ HostImageVersionKey = attribute.Key("host.image.version") ++) ++ ++var ( ++ // AMD64 ++ HostArchAMD64 = HostArchKey.String("amd64") ++ // ARM32 ++ HostArchARM32 = HostArchKey.String("arm32") ++ // ARM64 ++ HostArchARM64 = HostArchKey.String("arm64") ++ // Itanium ++ HostArchIA64 = HostArchKey.String("ia64") ++ // 32-bit PowerPC ++ HostArchPPC32 = HostArchKey.String("ppc32") ++ // 64-bit PowerPC ++ HostArchPPC64 = HostArchKey.String("ppc64") ++ // IBM z/Architecture ++ HostArchS390x = HostArchKey.String("s390x") ++ // 32-bit x86 ++ HostArchX86 = HostArchKey.String("x86") ++) ++ ++// HostID returns an attribute KeyValue conforming to the "host.id" semantic ++// conventions. It represents the unique host ID. For Cloud, this must be the ++// instance_id assigned by the cloud provider. For non-containerized Linux ++// systems, the `machine-id` located in `/etc/machine-id` or ++// `/var/lib/dbus/machine-id` may be used. ++func HostID(val string) attribute.KeyValue { ++ return HostIDKey.String(val) ++} ++ ++// HostName returns an attribute KeyValue conforming to the "host.name" ++// semantic conventions. It represents the name of the host. On Unix systems, ++// it may contain what the hostname command returns, or the fully qualified ++// hostname, or another name specified by the user. ++func HostName(val string) attribute.KeyValue { ++ return HostNameKey.String(val) ++} ++ ++// HostType returns an attribute KeyValue conforming to the "host.type" ++// semantic conventions. It represents the type of host. For Cloud, this must ++// be the machine type. ++func HostType(val string) attribute.KeyValue { ++ return HostTypeKey.String(val) ++} ++ ++// HostImageName returns an attribute KeyValue conforming to the ++// "host.image.name" semantic conventions. It represents the name of the VM ++// image or OS install the host was instantiated from. ++func HostImageName(val string) attribute.KeyValue { ++ return HostImageNameKey.String(val) ++} ++ ++// HostImageID returns an attribute KeyValue conforming to the ++// "host.image.id" semantic conventions. It represents the vM image ID. For ++// Cloud, this value is from the provider. ++func HostImageID(val string) attribute.KeyValue { ++ return HostImageIDKey.String(val) ++} ++ ++// HostImageVersion returns an attribute KeyValue conforming to the ++// "host.image.version" semantic conventions. It represents the version string ++// of the VM image as defined in [Version ++// Attributes](README.md#version-attributes). ++func HostImageVersion(val string) attribute.KeyValue { ++ return HostImageVersionKey.String(val) ++} ++ ++// A Kubernetes Cluster. ++const ( ++ // K8SClusterNameKey is the attribute Key conforming to the ++ // "k8s.cluster.name" semantic conventions. It represents the name of the ++ // cluster. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-cluster' ++ K8SClusterNameKey = attribute.Key("k8s.cluster.name") ++) ++ ++// K8SClusterName returns an attribute KeyValue conforming to the ++// "k8s.cluster.name" semantic conventions. It represents the name of the ++// cluster. ++func K8SClusterName(val string) attribute.KeyValue { ++ return K8SClusterNameKey.String(val) ++} ++ ++// A Kubernetes Node object. ++const ( ++ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" ++ // semantic conventions. It represents the name of the Node. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'node-1' ++ K8SNodeNameKey = attribute.Key("k8s.node.name") ++ ++ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" ++ // semantic conventions. It represents the UID of the Node. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' ++ K8SNodeUIDKey = attribute.Key("k8s.node.uid") ++) ++ ++// K8SNodeName returns an attribute KeyValue conforming to the ++// "k8s.node.name" semantic conventions. It represents the name of the Node. ++func K8SNodeName(val string) attribute.KeyValue { ++ return K8SNodeNameKey.String(val) ++} ++ ++// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" ++// semantic conventions. It represents the UID of the Node. ++func K8SNodeUID(val string) attribute.KeyValue { ++ return K8SNodeUIDKey.String(val) ++} ++ ++// A Kubernetes Namespace. ++const ( ++ // K8SNamespaceNameKey is the attribute Key conforming to the ++ // "k8s.namespace.name" semantic conventions. It represents the name of the ++ // namespace that the pod is running in. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'default' ++ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ++) ++ ++// K8SNamespaceName returns an attribute KeyValue conforming to the ++// "k8s.namespace.name" semantic conventions. It represents the name of the ++// namespace that the pod is running in. ++func K8SNamespaceName(val string) attribute.KeyValue { ++ return K8SNamespaceNameKey.String(val) ++} ++ ++// A Kubernetes Pod object. ++const ( ++ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" ++ // semantic conventions. It represents the UID of the Pod. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SPodUIDKey = attribute.Key("k8s.pod.uid") ++ ++ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" ++ // semantic conventions. It represents the name of the Pod. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-pod-autoconf' ++ K8SPodNameKey = attribute.Key("k8s.pod.name") ++) ++ ++// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" ++// semantic conventions. It represents the UID of the Pod. ++func K8SPodUID(val string) attribute.KeyValue { ++ return K8SPodUIDKey.String(val) ++} ++ ++// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" ++// semantic conventions. It represents the name of the Pod. ++func K8SPodName(val string) attribute.KeyValue { ++ return K8SPodNameKey.String(val) ++} ++ ++// A container in a ++// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). ++const ( ++ // K8SContainerNameKey is the attribute Key conforming to the ++ // "k8s.container.name" semantic conventions. It represents the name of the ++ // Container from Pod specification, must be unique within a Pod. Container ++ // runtime usually uses different globally unique name (`container.name`). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'redis' ++ K8SContainerNameKey = attribute.Key("k8s.container.name") ++ ++ // K8SContainerRestartCountKey is the attribute Key conforming to the ++ // "k8s.container.restart_count" semantic conventions. It represents the ++ // number of times the container was restarted. This attribute can be used ++ // to identify a particular container (running or stopped) within a ++ // container spec. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 0, 2 ++ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ++) ++ ++// K8SContainerName returns an attribute KeyValue conforming to the ++// "k8s.container.name" semantic conventions. It represents the name of the ++// Container from Pod specification, must be unique within a Pod. Container ++// runtime usually uses different globally unique name (`container.name`). ++func K8SContainerName(val string) attribute.KeyValue { ++ return K8SContainerNameKey.String(val) ++} ++ ++// K8SContainerRestartCount returns an attribute KeyValue conforming to the ++// "k8s.container.restart_count" semantic conventions. It represents the number ++// of times the container was restarted. This attribute can be used to identify ++// a particular container (running or stopped) within a container spec. ++func K8SContainerRestartCount(val int) attribute.KeyValue { ++ return K8SContainerRestartCountKey.Int(val) ++} ++ ++// A Kubernetes ReplicaSet object. ++const ( ++ // K8SReplicaSetUIDKey is the attribute Key conforming to the ++ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the ++ // ReplicaSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") ++ ++ // K8SReplicaSetNameKey is the attribute Key conforming to the ++ // "k8s.replicaset.name" semantic conventions. It represents the name of ++ // the ReplicaSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ++) ++ ++// K8SReplicaSetUID returns an attribute KeyValue conforming to the ++// "k8s.replicaset.uid" semantic conventions. It represents the UID of the ++// ReplicaSet. ++func K8SReplicaSetUID(val string) attribute.KeyValue { ++ return K8SReplicaSetUIDKey.String(val) ++} ++ ++// K8SReplicaSetName returns an attribute KeyValue conforming to the ++// "k8s.replicaset.name" semantic conventions. It represents the name of the ++// ReplicaSet. ++func K8SReplicaSetName(val string) attribute.KeyValue { ++ return K8SReplicaSetNameKey.String(val) ++} ++ ++// A Kubernetes Deployment object. ++const ( ++ // K8SDeploymentUIDKey is the attribute Key conforming to the ++ // "k8s.deployment.uid" semantic conventions. It represents the UID of the ++ // Deployment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") ++ ++ // K8SDeploymentNameKey is the attribute Key conforming to the ++ // "k8s.deployment.name" semantic conventions. It represents the name of ++ // the Deployment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ++) ++ ++// K8SDeploymentUID returns an attribute KeyValue conforming to the ++// "k8s.deployment.uid" semantic conventions. It represents the UID of the ++// Deployment. ++func K8SDeploymentUID(val string) attribute.KeyValue { ++ return K8SDeploymentUIDKey.String(val) ++} ++ ++// K8SDeploymentName returns an attribute KeyValue conforming to the ++// "k8s.deployment.name" semantic conventions. It represents the name of the ++// Deployment. ++func K8SDeploymentName(val string) attribute.KeyValue { ++ return K8SDeploymentNameKey.String(val) ++} ++ ++// A Kubernetes StatefulSet object. ++const ( ++ // K8SStatefulSetUIDKey is the attribute Key conforming to the ++ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the ++ // StatefulSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") ++ ++ // K8SStatefulSetNameKey is the attribute Key conforming to the ++ // "k8s.statefulset.name" semantic conventions. It represents the name of ++ // the StatefulSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ++) ++ ++// K8SStatefulSetUID returns an attribute KeyValue conforming to the ++// "k8s.statefulset.uid" semantic conventions. It represents the UID of the ++// StatefulSet. ++func K8SStatefulSetUID(val string) attribute.KeyValue { ++ return K8SStatefulSetUIDKey.String(val) ++} ++ ++// K8SStatefulSetName returns an attribute KeyValue conforming to the ++// "k8s.statefulset.name" semantic conventions. It represents the name of the ++// StatefulSet. ++func K8SStatefulSetName(val string) attribute.KeyValue { ++ return K8SStatefulSetNameKey.String(val) ++} ++ ++// A Kubernetes DaemonSet object. ++const ( ++ // K8SDaemonSetUIDKey is the attribute Key conforming to the ++ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the ++ // DaemonSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") ++ ++ // K8SDaemonSetNameKey is the attribute Key conforming to the ++ // "k8s.daemonset.name" semantic conventions. It represents the name of the ++ // DaemonSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ++) ++ ++// K8SDaemonSetUID returns an attribute KeyValue conforming to the ++// "k8s.daemonset.uid" semantic conventions. It represents the UID of the ++// DaemonSet. ++func K8SDaemonSetUID(val string) attribute.KeyValue { ++ return K8SDaemonSetUIDKey.String(val) ++} ++ ++// K8SDaemonSetName returns an attribute KeyValue conforming to the ++// "k8s.daemonset.name" semantic conventions. It represents the name of the ++// DaemonSet. ++func K8SDaemonSetName(val string) attribute.KeyValue { ++ return K8SDaemonSetNameKey.String(val) ++} ++ ++// A Kubernetes Job object. ++const ( ++ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" ++ // semantic conventions. It represents the UID of the Job. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SJobUIDKey = attribute.Key("k8s.job.uid") ++ ++ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" ++ // semantic conventions. It represents the name of the Job. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SJobNameKey = attribute.Key("k8s.job.name") ++) ++ ++// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" ++// semantic conventions. It represents the UID of the Job. ++func K8SJobUID(val string) attribute.KeyValue { ++ return K8SJobUIDKey.String(val) ++} ++ ++// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" ++// semantic conventions. It represents the name of the Job. ++func K8SJobName(val string) attribute.KeyValue { ++ return K8SJobNameKey.String(val) ++} ++ ++// A Kubernetes CronJob object. ++const ( ++ // K8SCronJobUIDKey is the attribute Key conforming to the ++ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the ++ // CronJob. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") ++ ++ // K8SCronJobNameKey is the attribute Key conforming to the ++ // "k8s.cronjob.name" semantic conventions. It represents the name of the ++ // CronJob. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ++) ++ ++// K8SCronJobUID returns an attribute KeyValue conforming to the ++// "k8s.cronjob.uid" semantic conventions. It represents the UID of the ++// CronJob. ++func K8SCronJobUID(val string) attribute.KeyValue { ++ return K8SCronJobUIDKey.String(val) ++} ++ ++// K8SCronJobName returns an attribute KeyValue conforming to the ++// "k8s.cronjob.name" semantic conventions. It represents the name of the ++// CronJob. ++func K8SCronJobName(val string) attribute.KeyValue { ++ return K8SCronJobNameKey.String(val) ++} ++ ++// The operating system (OS) on which the process represented by this resource ++// is running. ++const ( ++ // OSTypeKey is the attribute Key conforming to the "os.type" semantic ++ // conventions. It represents the operating system type. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ OSTypeKey = attribute.Key("os.type") ++ ++ // OSDescriptionKey is the attribute Key conforming to the "os.description" ++ // semantic conventions. It represents the human readable (not intended to ++ // be parsed) OS version information, like e.g. reported by `ver` or ++ // `lsb_release -a` commands. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 ++ // LTS' ++ OSDescriptionKey = attribute.Key("os.description") ++ ++ // OSNameKey is the attribute Key conforming to the "os.name" semantic ++ // conventions. It represents the human readable operating system name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iOS', 'Android', 'Ubuntu' ++ OSNameKey = attribute.Key("os.name") ++ ++ // OSVersionKey is the attribute Key conforming to the "os.version" ++ // semantic conventions. It represents the version string of the operating ++ // system as defined in [Version ++ // Attributes](../../resource/semantic_conventions/README.md#version-attributes). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '14.2.1', '18.04.1' ++ OSVersionKey = attribute.Key("os.version") ++) ++ ++var ( ++ // Microsoft Windows ++ OSTypeWindows = OSTypeKey.String("windows") ++ // Linux ++ OSTypeLinux = OSTypeKey.String("linux") ++ // Apple Darwin ++ OSTypeDarwin = OSTypeKey.String("darwin") ++ // FreeBSD ++ OSTypeFreeBSD = OSTypeKey.String("freebsd") ++ // NetBSD ++ OSTypeNetBSD = OSTypeKey.String("netbsd") ++ // OpenBSD ++ OSTypeOpenBSD = OSTypeKey.String("openbsd") ++ // DragonFly BSD ++ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") ++ // HP-UX (Hewlett Packard Unix) ++ OSTypeHPUX = OSTypeKey.String("hpux") ++ // AIX (Advanced Interactive eXecutive) ++ OSTypeAIX = OSTypeKey.String("aix") ++ // SunOS, Oracle Solaris ++ OSTypeSolaris = OSTypeKey.String("solaris") ++ // IBM z/OS ++ OSTypeZOS = OSTypeKey.String("z_os") ++) ++ ++// OSDescription returns an attribute KeyValue conforming to the ++// "os.description" semantic conventions. It represents the human readable (not ++// intended to be parsed) OS version information, like e.g. reported by `ver` ++// or `lsb_release -a` commands. ++func OSDescription(val string) attribute.KeyValue { ++ return OSDescriptionKey.String(val) ++} ++ ++// OSName returns an attribute KeyValue conforming to the "os.name" semantic ++// conventions. It represents the human readable operating system name. ++func OSName(val string) attribute.KeyValue { ++ return OSNameKey.String(val) ++} ++ ++// OSVersion returns an attribute KeyValue conforming to the "os.version" ++// semantic conventions. It represents the version string of the operating ++// system as defined in [Version ++// Attributes](../../resource/semantic_conventions/README.md#version-attributes). ++func OSVersion(val string) attribute.KeyValue { ++ return OSVersionKey.String(val) ++} ++ ++// An operating system process. ++const ( ++ // ProcessPIDKey is the attribute Key conforming to the "process.pid" ++ // semantic conventions. It represents the process identifier (PID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1234 ++ ProcessPIDKey = attribute.Key("process.pid") ++ ++ // ProcessParentPIDKey is the attribute Key conforming to the ++ // "process.parent_pid" semantic conventions. It represents the parent ++ // Process identifier (PID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 111 ++ ProcessParentPIDKey = attribute.Key("process.parent_pid") ++ ++ // ProcessExecutableNameKey is the attribute Key conforming to the ++ // "process.executable.name" semantic conventions. It represents the name ++ // of the process executable. On Linux based systems, can be set to the ++ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name ++ // of `GetProcessImageFileNameW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'otelcol' ++ ProcessExecutableNameKey = attribute.Key("process.executable.name") ++ ++ // ProcessExecutablePathKey is the attribute Key conforming to the ++ // "process.executable.path" semantic conventions. It represents the full ++ // path to the process executable. On Linux based systems, can be set to ++ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of ++ // `GetProcessImageFileNameW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: '/usr/bin/cmd/otelcol' ++ ProcessExecutablePathKey = attribute.Key("process.executable.path") ++ ++ // ProcessCommandKey is the attribute Key conforming to the ++ // "process.command" semantic conventions. It represents the command used ++ // to launch the process (i.e. the command name). On Linux based systems, ++ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can ++ // be set to the first parameter extracted from `GetCommandLineW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'cmd/otelcol' ++ ProcessCommandKey = attribute.Key("process.command") ++ ++ // ProcessCommandLineKey is the attribute Key conforming to the ++ // "process.command_line" semantic conventions. It represents the full ++ // command used to launch the process as a single string representing the ++ // full command. On Windows, can be set to the result of `GetCommandLineW`. ++ // Do not set this if you have to assemble it just for monitoring; use ++ // `process.command_args` instead. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ++ ProcessCommandLineKey = attribute.Key("process.command_line") ++ ++ // ProcessCommandArgsKey is the attribute Key conforming to the ++ // "process.command_args" semantic conventions. It represents the all the ++ // command arguments (including the command/executable itself) as received ++ // by the process. On Linux-based systems (and some other Unixoid systems ++ // supporting procfs), can be set according to the list of null-delimited ++ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, ++ // this would be the full argv vector passed to `main`. ++ // ++ // Type: string[] ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'cmd/otecol', '--config=config.yaml' ++ ProcessCommandArgsKey = attribute.Key("process.command_args") ++ ++ // ProcessOwnerKey is the attribute Key conforming to the "process.owner" ++ // semantic conventions. It represents the username of the user that owns ++ // the process. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'root' ++ ProcessOwnerKey = attribute.Key("process.owner") ++) ++ ++// ProcessPID returns an attribute KeyValue conforming to the "process.pid" ++// semantic conventions. It represents the process identifier (PID). ++func ProcessPID(val int) attribute.KeyValue { ++ return ProcessPIDKey.Int(val) ++} ++ ++// ProcessParentPID returns an attribute KeyValue conforming to the ++// "process.parent_pid" semantic conventions. It represents the parent Process ++// identifier (PID). ++func ProcessParentPID(val int) attribute.KeyValue { ++ return ProcessParentPIDKey.Int(val) ++} ++ ++// ProcessExecutableName returns an attribute KeyValue conforming to the ++// "process.executable.name" semantic conventions. It represents the name of ++// the process executable. On Linux based systems, can be set to the `Name` in ++// `proc/[pid]/status`. On Windows, can be set to the base name of ++// `GetProcessImageFileNameW`. ++func ProcessExecutableName(val string) attribute.KeyValue { ++ return ProcessExecutableNameKey.String(val) ++} ++ ++// ProcessExecutablePath returns an attribute KeyValue conforming to the ++// "process.executable.path" semantic conventions. It represents the full path ++// to the process executable. On Linux based systems, can be set to the target ++// of `proc/[pid]/exe`. On Windows, can be set to the result of ++// `GetProcessImageFileNameW`. ++func ProcessExecutablePath(val string) attribute.KeyValue { ++ return ProcessExecutablePathKey.String(val) ++} ++ ++// ProcessCommand returns an attribute KeyValue conforming to the ++// "process.command" semantic conventions. It represents the command used to ++// launch the process (i.e. the command name). On Linux based systems, can be ++// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to ++// the first parameter extracted from `GetCommandLineW`. ++func ProcessCommand(val string) attribute.KeyValue { ++ return ProcessCommandKey.String(val) ++} ++ ++// ProcessCommandLine returns an attribute KeyValue conforming to the ++// "process.command_line" semantic conventions. It represents the full command ++// used to launch the process as a single string representing the full command. ++// On Windows, can be set to the result of `GetCommandLineW`. Do not set this ++// if you have to assemble it just for monitoring; use `process.command_args` ++// instead. ++func ProcessCommandLine(val string) attribute.KeyValue { ++ return ProcessCommandLineKey.String(val) ++} ++ ++// ProcessCommandArgs returns an attribute KeyValue conforming to the ++// "process.command_args" semantic conventions. It represents the all the ++// command arguments (including the command/executable itself) as received by ++// the process. On Linux-based systems (and some other Unixoid systems ++// supporting procfs), can be set according to the list of null-delimited ++// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, ++// this would be the full argv vector passed to `main`. ++func ProcessCommandArgs(val ...string) attribute.KeyValue { ++ return ProcessCommandArgsKey.StringSlice(val) ++} ++ ++// ProcessOwner returns an attribute KeyValue conforming to the ++// "process.owner" semantic conventions. It represents the username of the user ++// that owns the process. ++func ProcessOwner(val string) attribute.KeyValue { ++ return ProcessOwnerKey.String(val) ++} ++ ++// The single (language) runtime instance which is monitored. ++const ( ++ // ProcessRuntimeNameKey is the attribute Key conforming to the ++ // "process.runtime.name" semantic conventions. It represents the name of ++ // the runtime of this process. For compiled native binaries, this SHOULD ++ // be the name of the compiler. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'OpenJDK Runtime Environment' ++ ProcessRuntimeNameKey = attribute.Key("process.runtime.name") ++ ++ // ProcessRuntimeVersionKey is the attribute Key conforming to the ++ // "process.runtime.version" semantic conventions. It represents the ++ // version of the runtime of this process, as returned by the runtime ++ // without modification. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '14.0.2' ++ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") ++ ++ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the ++ // "process.runtime.description" semantic conventions. It represents an ++ // additional description about the runtime of the process, for example a ++ // specific vendor customization of the runtime environment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ++ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ++) ++ ++// ProcessRuntimeName returns an attribute KeyValue conforming to the ++// "process.runtime.name" semantic conventions. It represents the name of the ++// runtime of this process. For compiled native binaries, this SHOULD be the ++// name of the compiler. ++func ProcessRuntimeName(val string) attribute.KeyValue { ++ return ProcessRuntimeNameKey.String(val) ++} ++ ++// ProcessRuntimeVersion returns an attribute KeyValue conforming to the ++// "process.runtime.version" semantic conventions. It represents the version of ++// the runtime of this process, as returned by the runtime without ++// modification. ++func ProcessRuntimeVersion(val string) attribute.KeyValue { ++ return ProcessRuntimeVersionKey.String(val) ++} ++ ++// ProcessRuntimeDescription returns an attribute KeyValue conforming to the ++// "process.runtime.description" semantic conventions. It represents an ++// additional description about the runtime of the process, for example a ++// specific vendor customization of the runtime environment. ++func ProcessRuntimeDescription(val string) attribute.KeyValue { ++ return ProcessRuntimeDescriptionKey.String(val) ++} ++ ++// A service instance. ++const ( ++ // ServiceNameKey is the attribute Key conforming to the "service.name" ++ // semantic conventions. It represents the logical name of the service. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'shoppingcart' ++ // Note: MUST be the same for all instances of horizontally scaled ++ // services. If the value was not specified, SDKs MUST fallback to ++ // `unknown_service:` concatenated with ++ // [`process.executable.name`](process.md#process), e.g. ++ // `unknown_service:bash`. If `process.executable.name` is not available, ++ // the value MUST be set to `unknown_service`. ++ ServiceNameKey = attribute.Key("service.name") ++ ++ // ServiceNamespaceKey is the attribute Key conforming to the ++ // "service.namespace" semantic conventions. It represents a namespace for ++ // `service.name`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Shop' ++ // Note: A string value having a meaning that helps to distinguish a group ++ // of services, for example the team name that owns a group of services. ++ // `service.name` is expected to be unique within the same namespace. If ++ // `service.namespace` is not specified in the Resource then `service.name` ++ // is expected to be unique for all services that have no explicit ++ // namespace defined (so the empty/unspecified namespace is simply one more ++ // valid namespace). Zero-length namespace string is assumed equal to ++ // unspecified namespace. ++ ServiceNamespaceKey = attribute.Key("service.namespace") ++ ++ // ServiceInstanceIDKey is the attribute Key conforming to the ++ // "service.instance.id" semantic conventions. It represents the string ID ++ // of the service instance. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '627cc493-f310-47de-96bd-71410b7dec09' ++ // Note: MUST be unique for each instance of the same ++ // `service.namespace,service.name` pair (in other words ++ // `service.namespace,service.name,service.instance.id` triplet MUST be ++ // globally unique). The ID helps to distinguish instances of the same ++ // service that exist at the same time (e.g. instances of a horizontally ++ // scaled service). It is preferable for the ID to be persistent and stay ++ // the same for the lifetime of the service instance, however it is ++ // acceptable that the ID is ephemeral and changes during important ++ // lifetime events for the service (e.g. service restarts). If the service ++ // has no inherent unique ID that can be used as the value of this ++ // attribute it is recommended to generate a random Version 1 or Version 4 ++ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use ++ // Version 5, see RFC 4122 for more recommendations). ++ ServiceInstanceIDKey = attribute.Key("service.instance.id") ++ ++ // ServiceVersionKey is the attribute Key conforming to the ++ // "service.version" semantic conventions. It represents the version string ++ // of the service API or implementation. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2.0.0' ++ ServiceVersionKey = attribute.Key("service.version") ++) ++ ++// ServiceName returns an attribute KeyValue conforming to the ++// "service.name" semantic conventions. It represents the logical name of the ++// service. ++func ServiceName(val string) attribute.KeyValue { ++ return ServiceNameKey.String(val) ++} ++ ++// ServiceNamespace returns an attribute KeyValue conforming to the ++// "service.namespace" semantic conventions. It represents a namespace for ++// `service.name`. ++func ServiceNamespace(val string) attribute.KeyValue { ++ return ServiceNamespaceKey.String(val) ++} ++ ++// ServiceInstanceID returns an attribute KeyValue conforming to the ++// "service.instance.id" semantic conventions. It represents the string ID of ++// the service instance. ++func ServiceInstanceID(val string) attribute.KeyValue { ++ return ServiceInstanceIDKey.String(val) ++} ++ ++// ServiceVersion returns an attribute KeyValue conforming to the ++// "service.version" semantic conventions. It represents the version string of ++// the service API or implementation. ++func ServiceVersion(val string) attribute.KeyValue { ++ return ServiceVersionKey.String(val) ++} ++ ++// The telemetry SDK used to capture data recorded by the instrumentation ++// libraries. ++const ( ++ // TelemetrySDKNameKey is the attribute Key conforming to the ++ // "telemetry.sdk.name" semantic conventions. It represents the name of the ++ // telemetry SDK as defined above. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") ++ ++ // TelemetrySDKLanguageKey is the attribute Key conforming to the ++ // "telemetry.sdk.language" semantic conventions. It represents the ++ // language of the telemetry SDK. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") ++ ++ // TelemetrySDKVersionKey is the attribute Key conforming to the ++ // "telemetry.sdk.version" semantic conventions. It represents the version ++ // string of the telemetry SDK. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.2.3' ++ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") ++ ++ // TelemetryAutoVersionKey is the attribute Key conforming to the ++ // "telemetry.auto.version" semantic conventions. It represents the version ++ // string of the auto instrumentation agent, if used. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.2.3' ++ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ++) ++ ++var ( ++ // cpp ++ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") ++ // dotnet ++ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") ++ // erlang ++ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") ++ // go ++ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") ++ // java ++ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") ++ // nodejs ++ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") ++ // php ++ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") ++ // python ++ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") ++ // ruby ++ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") ++ // webjs ++ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") ++ // swift ++ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ++) ++ ++// TelemetrySDKName returns an attribute KeyValue conforming to the ++// "telemetry.sdk.name" semantic conventions. It represents the name of the ++// telemetry SDK as defined above. ++func TelemetrySDKName(val string) attribute.KeyValue { ++ return TelemetrySDKNameKey.String(val) ++} ++ ++// TelemetrySDKVersion returns an attribute KeyValue conforming to the ++// "telemetry.sdk.version" semantic conventions. It represents the version ++// string of the telemetry SDK. ++func TelemetrySDKVersion(val string) attribute.KeyValue { ++ return TelemetrySDKVersionKey.String(val) ++} ++ ++// TelemetryAutoVersion returns an attribute KeyValue conforming to the ++// "telemetry.auto.version" semantic conventions. It represents the version ++// string of the auto instrumentation agent, if used. ++func TelemetryAutoVersion(val string) attribute.KeyValue { ++ return TelemetryAutoVersionKey.String(val) ++} ++ ++// Resource describing the packaged software running the application code. Web ++// engines are typically executed using process.runtime. ++const ( ++ // WebEngineNameKey is the attribute Key conforming to the "webengine.name" ++ // semantic conventions. It represents the name of the web engine. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'WildFly' ++ WebEngineNameKey = attribute.Key("webengine.name") ++ ++ // WebEngineVersionKey is the attribute Key conforming to the ++ // "webengine.version" semantic conventions. It represents the version of ++ // the web engine. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '21.0.0' ++ WebEngineVersionKey = attribute.Key("webengine.version") ++ ++ // WebEngineDescriptionKey is the attribute Key conforming to the ++ // "webengine.description" semantic conventions. It represents the ++ // additional description of the web engine (e.g. detailed version and ++ // edition information). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - ++ // 2.2.2.Final' ++ WebEngineDescriptionKey = attribute.Key("webengine.description") ++) ++ ++// WebEngineName returns an attribute KeyValue conforming to the ++// "webengine.name" semantic conventions. It represents the name of the web ++// engine. ++func WebEngineName(val string) attribute.KeyValue { ++ return WebEngineNameKey.String(val) ++} ++ ++// WebEngineVersion returns an attribute KeyValue conforming to the ++// "webengine.version" semantic conventions. It represents the version of the ++// web engine. ++func WebEngineVersion(val string) attribute.KeyValue { ++ return WebEngineVersionKey.String(val) ++} ++ ++// WebEngineDescription returns an attribute KeyValue conforming to the ++// "webengine.description" semantic conventions. It represents the additional ++// description of the web engine (e.g. detailed version and edition ++// information). ++func WebEngineDescription(val string) attribute.KeyValue { ++ return WebEngineDescriptionKey.String(val) ++} ++ ++// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's ++// concepts. ++const ( ++ // OtelScopeNameKey is the attribute Key conforming to the ++ // "otel.scope.name" semantic conventions. It represents the name of the ++ // instrumentation scope - (`InstrumentationScope.Name` in OTLP). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'io.opentelemetry.contrib.mongodb' ++ OtelScopeNameKey = attribute.Key("otel.scope.name") ++ ++ // OtelScopeVersionKey is the attribute Key conforming to the ++ // "otel.scope.version" semantic conventions. It represents the version of ++ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.0.0' ++ OtelScopeVersionKey = attribute.Key("otel.scope.version") ++) ++ ++// OtelScopeName returns an attribute KeyValue conforming to the ++// "otel.scope.name" semantic conventions. It represents the name of the ++// instrumentation scope - (`InstrumentationScope.Name` in OTLP). ++func OtelScopeName(val string) attribute.KeyValue { ++ return OtelScopeNameKey.String(val) ++} ++ ++// OtelScopeVersion returns an attribute KeyValue conforming to the ++// "otel.scope.version" semantic conventions. It represents the version of the ++// instrumentation scope - (`InstrumentationScope.Version` in OTLP). ++func OtelScopeVersion(val string) attribute.KeyValue { ++ return OtelScopeVersionKey.String(val) ++} ++ ++// Span attributes used by non-OTLP exporters to represent OpenTelemetry ++// Scope's concepts. ++const ( ++ // OtelLibraryNameKey is the attribute Key conforming to the ++ // "otel.library.name" semantic conventions. It represents the deprecated, ++ // use the `otel.scope.name` attribute. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'io.opentelemetry.contrib.mongodb' ++ OtelLibraryNameKey = attribute.Key("otel.library.name") ++ ++ // OtelLibraryVersionKey is the attribute Key conforming to the ++ // "otel.library.version" semantic conventions. It represents the ++ // deprecated, use the `otel.scope.version` attribute. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '1.0.0' ++ OtelLibraryVersionKey = attribute.Key("otel.library.version") ++) ++ ++// OtelLibraryName returns an attribute KeyValue conforming to the ++// "otel.library.name" semantic conventions. It represents the deprecated, use ++// the `otel.scope.name` attribute. ++func OtelLibraryName(val string) attribute.KeyValue { ++ return OtelLibraryNameKey.String(val) ++} ++ ++// OtelLibraryVersion returns an attribute KeyValue conforming to the ++// "otel.library.version" semantic conventions. It represents the deprecated, ++// use the `otel.scope.version` attribute. ++func OtelLibraryVersion(val string) attribute.KeyValue { ++ return OtelLibraryVersionKey.String(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +new file mode 100644 +index 00000000000..42fc525d165 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" ++ ++// SchemaURL is the schema URL that matches the version of the semantic conventions ++// that this package defines. Semconv packages starting from v1.4.0 must declare ++// non-empty schema URL in the form https://opentelemetry.io/schemas/ ++const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +new file mode 100644 +index 00000000000..8c4a7299d27 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +@@ -0,0 +1,3375 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// The shared attributes used to report a single exception associated with a ++// span or log. ++const ( ++ // ExceptionTypeKey is the attribute Key conforming to the "exception.type" ++ // semantic conventions. It represents the type of the exception (its ++ // fully-qualified class name, if applicable). The dynamic type of the ++ // exception should be preferred over the static type in languages that ++ // support it. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'java.net.ConnectException', 'OSError' ++ ExceptionTypeKey = attribute.Key("exception.type") ++ ++ // ExceptionMessageKey is the attribute Key conforming to the ++ // "exception.message" semantic conventions. It represents the exception ++ // message. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Division by zero', "Can't convert 'int' object to str ++ // implicitly" ++ ExceptionMessageKey = attribute.Key("exception.message") ++ ++ // ExceptionStacktraceKey is the attribute Key conforming to the ++ // "exception.stacktrace" semantic conventions. It represents a stacktrace ++ // as a string in the natural representation for the language runtime. The ++ // representation is to be determined and documented by each language SIG. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test ++ // exception\\n at ' ++ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' ++ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' ++ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ++ ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ++) ++ ++// ExceptionType returns an attribute KeyValue conforming to the ++// "exception.type" semantic conventions. It represents the type of the ++// exception (its fully-qualified class name, if applicable). The dynamic type ++// of the exception should be preferred over the static type in languages that ++// support it. ++func ExceptionType(val string) attribute.KeyValue { ++ return ExceptionTypeKey.String(val) ++} ++ ++// ExceptionMessage returns an attribute KeyValue conforming to the ++// "exception.message" semantic conventions. It represents the exception ++// message. ++func ExceptionMessage(val string) attribute.KeyValue { ++ return ExceptionMessageKey.String(val) ++} ++ ++// ExceptionStacktrace returns an attribute KeyValue conforming to the ++// "exception.stacktrace" semantic conventions. It represents a stacktrace as a ++// string in the natural representation for the language runtime. The ++// representation is to be determined and documented by each language SIG. ++func ExceptionStacktrace(val string) attribute.KeyValue { ++ return ExceptionStacktraceKey.String(val) ++} ++ ++// Attributes for Events represented using Log Records. ++const ( ++ // EventNameKey is the attribute Key conforming to the "event.name" ++ // semantic conventions. It represents the name identifies the event. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'click', 'exception' ++ EventNameKey = attribute.Key("event.name") ++ ++ // EventDomainKey is the attribute Key conforming to the "event.domain" ++ // semantic conventions. It represents the domain identifies the business ++ // context for the events. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: Events across different domains may have same `event.name`, yet be ++ // unrelated events. ++ EventDomainKey = attribute.Key("event.domain") ++) ++ ++var ( ++ // Events from browser apps ++ EventDomainBrowser = EventDomainKey.String("browser") ++ // Events from mobile apps ++ EventDomainDevice = EventDomainKey.String("device") ++ // Events from Kubernetes ++ EventDomainK8S = EventDomainKey.String("k8s") ++) ++ ++// EventName returns an attribute KeyValue conforming to the "event.name" ++// semantic conventions. It represents the name identifies the event. ++func EventName(val string) attribute.KeyValue { ++ return EventNameKey.String(val) ++} ++ ++// Span attributes used by AWS Lambda (in addition to general `faas` ++// attributes). ++const ( ++ // AWSLambdaInvokedARNKey is the attribute Key conforming to the ++ // "aws.lambda.invoked_arn" semantic conventions. It represents the full ++ // invoked ARN as provided on the `Context` passed to the function ++ // (`Lambda-Runtime-Invoked-Function-ARN` header on the ++ // `/runtime/invocation/next` applicable). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' ++ // Note: This may be different from `faas.id` if an alias is involved. ++ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ++) ++ ++// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the ++// "aws.lambda.invoked_arn" semantic conventions. It represents the full ++// invoked ARN as provided on the `Context` passed to the function ++// (`Lambda-Runtime-Invoked-Function-ARN` header on the ++// `/runtime/invocation/next` applicable). ++func AWSLambdaInvokedARN(val string) attribute.KeyValue { ++ return AWSLambdaInvokedARNKey.String(val) ++} ++ ++// Attributes for CloudEvents. CloudEvents is a specification on how to define ++// event data in a standard way. These attributes can be attached to spans when ++// performing operations with CloudEvents, regardless of the protocol being ++// used. ++const ( ++ // CloudeventsEventIDKey is the attribute Key conforming to the ++ // "cloudevents.event_id" semantic conventions. It represents the ++ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) ++ // uniquely identifies the event. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' ++ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") ++ ++ // CloudeventsEventSourceKey is the attribute Key conforming to the ++ // "cloudevents.event_source" semantic conventions. It represents the ++ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) ++ // identifies the context in which an event happened. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'https://github.com/cloudevents', ++ // '/cloudevents/spec/pull/123', 'my-service' ++ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") ++ ++ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the ++ // "cloudevents.event_spec_version" semantic conventions. It represents the ++ // [version of the CloudEvents ++ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) ++ // which the event uses. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.0' ++ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") ++ ++ // CloudeventsEventTypeKey is the attribute Key conforming to the ++ // "cloudevents.event_type" semantic conventions. It represents the ++ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) ++ // contains a value describing the type of event related to the originating ++ // occurrence. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'com.github.pull_request.opened', ++ // 'com.example.object.deleted.v2' ++ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") ++ ++ // CloudeventsEventSubjectKey is the attribute Key conforming to the ++ // "cloudevents.event_subject" semantic conventions. It represents the ++ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) ++ // of the event in the context of the event producer (identified by ++ // source). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'mynewfile.jpg' ++ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ++) ++ ++// CloudeventsEventID returns an attribute KeyValue conforming to the ++// "cloudevents.event_id" semantic conventions. It represents the ++// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) ++// uniquely identifies the event. ++func CloudeventsEventID(val string) attribute.KeyValue { ++ return CloudeventsEventIDKey.String(val) ++} ++ ++// CloudeventsEventSource returns an attribute KeyValue conforming to the ++// "cloudevents.event_source" semantic conventions. It represents the ++// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) ++// identifies the context in which an event happened. ++func CloudeventsEventSource(val string) attribute.KeyValue { ++ return CloudeventsEventSourceKey.String(val) ++} ++ ++// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to ++// the "cloudevents.event_spec_version" semantic conventions. It represents the ++// [version of the CloudEvents ++// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) ++// which the event uses. ++func CloudeventsEventSpecVersion(val string) attribute.KeyValue { ++ return CloudeventsEventSpecVersionKey.String(val) ++} ++ ++// CloudeventsEventType returns an attribute KeyValue conforming to the ++// "cloudevents.event_type" semantic conventions. It represents the ++// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) ++// contains a value describing the type of event related to the originating ++// occurrence. ++func CloudeventsEventType(val string) attribute.KeyValue { ++ return CloudeventsEventTypeKey.String(val) ++} ++ ++// CloudeventsEventSubject returns an attribute KeyValue conforming to the ++// "cloudevents.event_subject" semantic conventions. It represents the ++// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) ++// of the event in the context of the event producer (identified by source). ++func CloudeventsEventSubject(val string) attribute.KeyValue { ++ return CloudeventsEventSubjectKey.String(val) ++} ++ ++// Semantic conventions for the OpenTracing Shim ++const ( ++ // OpentracingRefTypeKey is the attribute Key conforming to the ++ // "opentracing.ref_type" semantic conventions. It represents the ++ // parent-child Reference type ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: The causal relationship between a child Span and a parent Span. ++ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ++) ++ ++var ( ++ // The parent Span depends on the child Span in some capacity ++ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") ++ // The parent Span does not depend in any way on the result of the child Span ++ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ++) ++ ++// The attributes used to perform database client calls. ++const ( ++ // DBSystemKey is the attribute Key conforming to the "db.system" semantic ++ // conventions. It represents an identifier for the database management ++ // system (DBMS) product being used. See below for a list of well-known ++ // identifiers. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ DBSystemKey = attribute.Key("db.system") ++ ++ // DBConnectionStringKey is the attribute Key conforming to the ++ // "db.connection_string" semantic conventions. It represents the ++ // connection string used to connect to the database. It is recommended to ++ // remove embedded credentials. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' ++ DBConnectionStringKey = attribute.Key("db.connection_string") ++ ++ // DBUserKey is the attribute Key conforming to the "db.user" semantic ++ // conventions. It represents the username for accessing the database. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'readonly_user', 'reporting_user' ++ DBUserKey = attribute.Key("db.user") ++ ++ // DBJDBCDriverClassnameKey is the attribute Key conforming to the ++ // "db.jdbc.driver_classname" semantic conventions. It represents the ++ // fully-qualified class name of the [Java Database Connectivity ++ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) ++ // driver used to connect. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'org.postgresql.Driver', ++ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' ++ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") ++ ++ // DBNameKey is the attribute Key conforming to the "db.name" semantic ++ // conventions. It represents the this attribute is used to report the name ++ // of the database being accessed. For commands that switch the database, ++ // this should be set to the target database (even if the command fails). ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If applicable.) ++ // Stability: stable ++ // Examples: 'customers', 'main' ++ // Note: In some SQL databases, the database name to be used is called ++ // "schema name". In case there are multiple layers that could be ++ // considered for database name (e.g. Oracle instance name and schema ++ // name), the database name to be used is the more specific layer (e.g. ++ // Oracle schema name). ++ DBNameKey = attribute.Key("db.name") ++ ++ // DBStatementKey is the attribute Key conforming to the "db.statement" ++ // semantic conventions. It represents the database statement being ++ // executed. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If applicable and not ++ // explicitly disabled via instrumentation configuration.) ++ // Stability: stable ++ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' ++ // Note: The value may be sanitized to exclude sensitive information. ++ DBStatementKey = attribute.Key("db.statement") ++ ++ // DBOperationKey is the attribute Key conforming to the "db.operation" ++ // semantic conventions. It represents the name of the operation being ++ // executed, e.g. the [MongoDB command ++ // name](https://docs.mongodb.com/manual/reference/command/#database-operations) ++ // such as `findAndModify`, or the SQL keyword. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If `db.statement` is not ++ // applicable.) ++ // Stability: stable ++ // Examples: 'findAndModify', 'HMSET', 'SELECT' ++ // Note: When setting this to an SQL keyword, it is not recommended to ++ // attempt any client-side parsing of `db.statement` just to get this ++ // property, but it should be set if the operation name is provided by the ++ // library being instrumented. If the SQL statement has an ambiguous ++ // operation, or performs more than one operation, this value may be ++ // omitted. ++ DBOperationKey = attribute.Key("db.operation") ++) ++ ++var ( ++ // Some other SQL database. Fallback only. See notes ++ DBSystemOtherSQL = DBSystemKey.String("other_sql") ++ // Microsoft SQL Server ++ DBSystemMSSQL = DBSystemKey.String("mssql") ++ // MySQL ++ DBSystemMySQL = DBSystemKey.String("mysql") ++ // Oracle Database ++ DBSystemOracle = DBSystemKey.String("oracle") ++ // IBM DB2 ++ DBSystemDB2 = DBSystemKey.String("db2") ++ // PostgreSQL ++ DBSystemPostgreSQL = DBSystemKey.String("postgresql") ++ // Amazon Redshift ++ DBSystemRedshift = DBSystemKey.String("redshift") ++ // Apache Hive ++ DBSystemHive = DBSystemKey.String("hive") ++ // Cloudscape ++ DBSystemCloudscape = DBSystemKey.String("cloudscape") ++ // HyperSQL DataBase ++ DBSystemHSQLDB = DBSystemKey.String("hsqldb") ++ // Progress Database ++ DBSystemProgress = DBSystemKey.String("progress") ++ // SAP MaxDB ++ DBSystemMaxDB = DBSystemKey.String("maxdb") ++ // SAP HANA ++ DBSystemHanaDB = DBSystemKey.String("hanadb") ++ // Ingres ++ DBSystemIngres = DBSystemKey.String("ingres") ++ // FirstSQL ++ DBSystemFirstSQL = DBSystemKey.String("firstsql") ++ // EnterpriseDB ++ DBSystemEDB = DBSystemKey.String("edb") ++ // InterSystems Caché ++ DBSystemCache = DBSystemKey.String("cache") ++ // Adabas (Adaptable Database System) ++ DBSystemAdabas = DBSystemKey.String("adabas") ++ // Firebird ++ DBSystemFirebird = DBSystemKey.String("firebird") ++ // Apache Derby ++ DBSystemDerby = DBSystemKey.String("derby") ++ // FileMaker ++ DBSystemFilemaker = DBSystemKey.String("filemaker") ++ // Informix ++ DBSystemInformix = DBSystemKey.String("informix") ++ // InstantDB ++ DBSystemInstantDB = DBSystemKey.String("instantdb") ++ // InterBase ++ DBSystemInterbase = DBSystemKey.String("interbase") ++ // MariaDB ++ DBSystemMariaDB = DBSystemKey.String("mariadb") ++ // Netezza ++ DBSystemNetezza = DBSystemKey.String("netezza") ++ // Pervasive PSQL ++ DBSystemPervasive = DBSystemKey.String("pervasive") ++ // PointBase ++ DBSystemPointbase = DBSystemKey.String("pointbase") ++ // SQLite ++ DBSystemSqlite = DBSystemKey.String("sqlite") ++ // Sybase ++ DBSystemSybase = DBSystemKey.String("sybase") ++ // Teradata ++ DBSystemTeradata = DBSystemKey.String("teradata") ++ // Vertica ++ DBSystemVertica = DBSystemKey.String("vertica") ++ // H2 ++ DBSystemH2 = DBSystemKey.String("h2") ++ // ColdFusion IMQ ++ DBSystemColdfusion = DBSystemKey.String("coldfusion") ++ // Apache Cassandra ++ DBSystemCassandra = DBSystemKey.String("cassandra") ++ // Apache HBase ++ DBSystemHBase = DBSystemKey.String("hbase") ++ // MongoDB ++ DBSystemMongoDB = DBSystemKey.String("mongodb") ++ // Redis ++ DBSystemRedis = DBSystemKey.String("redis") ++ // Couchbase ++ DBSystemCouchbase = DBSystemKey.String("couchbase") ++ // CouchDB ++ DBSystemCouchDB = DBSystemKey.String("couchdb") ++ // Microsoft Azure Cosmos DB ++ DBSystemCosmosDB = DBSystemKey.String("cosmosdb") ++ // Amazon DynamoDB ++ DBSystemDynamoDB = DBSystemKey.String("dynamodb") ++ // Neo4j ++ DBSystemNeo4j = DBSystemKey.String("neo4j") ++ // Apache Geode ++ DBSystemGeode = DBSystemKey.String("geode") ++ // Elasticsearch ++ DBSystemElasticsearch = DBSystemKey.String("elasticsearch") ++ // Memcached ++ DBSystemMemcached = DBSystemKey.String("memcached") ++ // CockroachDB ++ DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ++ // OpenSearch ++ DBSystemOpensearch = DBSystemKey.String("opensearch") ++ // ClickHouse ++ DBSystemClickhouse = DBSystemKey.String("clickhouse") ++) ++ ++// DBConnectionString returns an attribute KeyValue conforming to the ++// "db.connection_string" semantic conventions. It represents the connection ++// string used to connect to the database. It is recommended to remove embedded ++// credentials. ++func DBConnectionString(val string) attribute.KeyValue { ++ return DBConnectionStringKey.String(val) ++} ++ ++// DBUser returns an attribute KeyValue conforming to the "db.user" semantic ++// conventions. It represents the username for accessing the database. ++func DBUser(val string) attribute.KeyValue { ++ return DBUserKey.String(val) ++} ++ ++// DBJDBCDriverClassname returns an attribute KeyValue conforming to the ++// "db.jdbc.driver_classname" semantic conventions. It represents the ++// fully-qualified class name of the [Java Database Connectivity ++// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver ++// used to connect. ++func DBJDBCDriverClassname(val string) attribute.KeyValue { ++ return DBJDBCDriverClassnameKey.String(val) ++} ++ ++// DBName returns an attribute KeyValue conforming to the "db.name" semantic ++// conventions. It represents the this attribute is used to report the name of ++// the database being accessed. For commands that switch the database, this ++// should be set to the target database (even if the command fails). ++func DBName(val string) attribute.KeyValue { ++ return DBNameKey.String(val) ++} ++ ++// DBStatement returns an attribute KeyValue conforming to the ++// "db.statement" semantic conventions. It represents the database statement ++// being executed. ++func DBStatement(val string) attribute.KeyValue { ++ return DBStatementKey.String(val) ++} ++ ++// DBOperation returns an attribute KeyValue conforming to the ++// "db.operation" semantic conventions. It represents the name of the operation ++// being executed, e.g. the [MongoDB command ++// name](https://docs.mongodb.com/manual/reference/command/#database-operations) ++// such as `findAndModify`, or the SQL keyword. ++func DBOperation(val string) attribute.KeyValue { ++ return DBOperationKey.String(val) ++} ++ ++// Connection-level attributes for Microsoft SQL Server ++const ( ++ // DBMSSQLInstanceNameKey is the attribute Key conforming to the ++ // "db.mssql.instance_name" semantic conventions. It represents the ++ // Microsoft SQL Server [instance ++ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) ++ // connecting to. This name is used to determine the port of a named ++ // instance. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MSSQLSERVER' ++ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no ++ // longer required (but still recommended if non-standard). ++ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ++) ++ ++// DBMSSQLInstanceName returns an attribute KeyValue conforming to the ++// "db.mssql.instance_name" semantic conventions. It represents the Microsoft ++// SQL Server [instance ++// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) ++// connecting to. This name is used to determine the port of a named instance. ++func DBMSSQLInstanceName(val string) attribute.KeyValue { ++ return DBMSSQLInstanceNameKey.String(val) ++} ++ ++// Call-level attributes for Cassandra ++const ( ++ // DBCassandraPageSizeKey is the attribute Key conforming to the ++ // "db.cassandra.page_size" semantic conventions. It represents the fetch ++ // size used for paging, i.e. how many rows will be returned at once. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 5000 ++ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") ++ ++ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the ++ // "db.cassandra.consistency_level" semantic conventions. It represents the ++ // consistency level of the query. Based on consistency values from ++ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") ++ ++ // DBCassandraTableKey is the attribute Key conforming to the ++ // "db.cassandra.table" semantic conventions. It represents the name of the ++ // primary table that the operation is acting upon, including the keyspace ++ // name (if applicable). ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'mytable' ++ // Note: This mirrors the db.sql.table attribute but references cassandra ++ // rather than sql. It is not recommended to attempt any client-side ++ // parsing of `db.statement` just to get this property, but it should be ++ // set if it is provided by the library being instrumented. If the ++ // operation is acting upon an anonymous table, or more than one table, ++ // this value MUST NOT be set. ++ DBCassandraTableKey = attribute.Key("db.cassandra.table") ++ ++ // DBCassandraIdempotenceKey is the attribute Key conforming to the ++ // "db.cassandra.idempotence" semantic conventions. It represents the ++ // whether or not the query is idempotent. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") ++ ++ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming ++ // to the "db.cassandra.speculative_execution_count" semantic conventions. ++ // It represents the number of times a query was speculatively executed. ++ // Not set or `0` if the query was not executed speculatively. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 0, 2 ++ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") ++ ++ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the ++ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID ++ // of the coordinating node for a query. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' ++ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") ++ ++ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the ++ // "db.cassandra.coordinator.dc" semantic conventions. It represents the ++ // data center of the coordinating node for a query. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-west-2' ++ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ++) ++ ++var ( ++ // all ++ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") ++ // each_quorum ++ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") ++ // quorum ++ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") ++ // local_quorum ++ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") ++ // one ++ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") ++ // two ++ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") ++ // three ++ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") ++ // local_one ++ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") ++ // any ++ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") ++ // serial ++ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") ++ // local_serial ++ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ++) ++ ++// DBCassandraPageSize returns an attribute KeyValue conforming to the ++// "db.cassandra.page_size" semantic conventions. It represents the fetch size ++// used for paging, i.e. how many rows will be returned at once. ++func DBCassandraPageSize(val int) attribute.KeyValue { ++ return DBCassandraPageSizeKey.Int(val) ++} ++ ++// DBCassandraTable returns an attribute KeyValue conforming to the ++// "db.cassandra.table" semantic conventions. It represents the name of the ++// primary table that the operation is acting upon, including the keyspace name ++// (if applicable). ++func DBCassandraTable(val string) attribute.KeyValue { ++ return DBCassandraTableKey.String(val) ++} ++ ++// DBCassandraIdempotence returns an attribute KeyValue conforming to the ++// "db.cassandra.idempotence" semantic conventions. It represents the whether ++// or not the query is idempotent. ++func DBCassandraIdempotence(val bool) attribute.KeyValue { ++ return DBCassandraIdempotenceKey.Bool(val) ++} ++ ++// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue ++// conforming to the "db.cassandra.speculative_execution_count" semantic ++// conventions. It represents the number of times a query was speculatively ++// executed. Not set or `0` if the query was not executed speculatively. ++func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { ++ return DBCassandraSpeculativeExecutionCountKey.Int(val) ++} ++ ++// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the ++// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of ++// the coordinating node for a query. ++func DBCassandraCoordinatorID(val string) attribute.KeyValue { ++ return DBCassandraCoordinatorIDKey.String(val) ++} ++ ++// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the ++// "db.cassandra.coordinator.dc" semantic conventions. It represents the data ++// center of the coordinating node for a query. ++func DBCassandraCoordinatorDC(val string) attribute.KeyValue { ++ return DBCassandraCoordinatorDCKey.String(val) ++} ++ ++// Call-level attributes for Redis ++const ( ++ // DBRedisDBIndexKey is the attribute Key conforming to the ++ // "db.redis.database_index" semantic conventions. It represents the index ++ // of the database being accessed as used in the [`SELECT` ++ // command](https://redis.io/commands/select), provided as an integer. To ++ // be used instead of the generic `db.name` attribute. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If other than the default ++ // database (`0`).) ++ // Stability: stable ++ // Examples: 0, 1, 15 ++ DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ++) ++ ++// DBRedisDBIndex returns an attribute KeyValue conforming to the ++// "db.redis.database_index" semantic conventions. It represents the index of ++// the database being accessed as used in the [`SELECT` ++// command](https://redis.io/commands/select), provided as an integer. To be ++// used instead of the generic `db.name` attribute. ++func DBRedisDBIndex(val int) attribute.KeyValue { ++ return DBRedisDBIndexKey.Int(val) ++} ++ ++// Call-level attributes for MongoDB ++const ( ++ // DBMongoDBCollectionKey is the attribute Key conforming to the ++ // "db.mongodb.collection" semantic conventions. It represents the ++ // collection being accessed within the database stated in `db.name`. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'customers', 'products' ++ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ++) ++ ++// DBMongoDBCollection returns an attribute KeyValue conforming to the ++// "db.mongodb.collection" semantic conventions. It represents the collection ++// being accessed within the database stated in `db.name`. ++func DBMongoDBCollection(val string) attribute.KeyValue { ++ return DBMongoDBCollectionKey.String(val) ++} ++ ++// Call-level attributes for SQL databases ++const ( ++ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" ++ // semantic conventions. It represents the name of the primary table that ++ // the operation is acting upon, including the database name (if ++ // applicable). ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'public.users', 'customers' ++ // Note: It is not recommended to attempt any client-side parsing of ++ // `db.statement` just to get this property, but it should be set if it is ++ // provided by the library being instrumented. If the operation is acting ++ // upon an anonymous table, or more than one table, this value MUST NOT be ++ // set. ++ DBSQLTableKey = attribute.Key("db.sql.table") ++) ++ ++// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" ++// semantic conventions. It represents the name of the primary table that the ++// operation is acting upon, including the database name (if applicable). ++func DBSQLTable(val string) attribute.KeyValue { ++ return DBSQLTableKey.String(val) ++} ++ ++// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's ++// concepts. ++const ( ++ // OtelStatusCodeKey is the attribute Key conforming to the ++ // "otel.status_code" semantic conventions. It represents the name of the ++ // code, either "OK" or "ERROR". MUST NOT be set if the status code is ++ // UNSET. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ OtelStatusCodeKey = attribute.Key("otel.status_code") ++ ++ // OtelStatusDescriptionKey is the attribute Key conforming to the ++ // "otel.status_description" semantic conventions. It represents the ++ // description of the Status if it has a value, otherwise not set. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'resource not found' ++ OtelStatusDescriptionKey = attribute.Key("otel.status_description") ++) ++ ++var ( ++ // The operation has been validated by an Application developer or Operator to have completed successfully ++ OtelStatusCodeOk = OtelStatusCodeKey.String("OK") ++ // The operation contains an error ++ OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") ++) ++ ++// OtelStatusDescription returns an attribute KeyValue conforming to the ++// "otel.status_description" semantic conventions. It represents the ++// description of the Status if it has a value, otherwise not set. ++func OtelStatusDescription(val string) attribute.KeyValue { ++ return OtelStatusDescriptionKey.String(val) ++} ++ ++// This semantic convention describes an instance of a function that runs ++// without provisioning or managing of servers (also known as serverless ++// functions or Function as a Service (FaaS)) with spans. ++const ( ++ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" ++ // semantic conventions. It represents the type of the trigger which caused ++ // this function execution. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: For the server/consumer span on the incoming side, ++ // `faas.trigger` MUST be set. ++ // ++ // Clients invoking FaaS instances usually cannot set `faas.trigger`, ++ // since they would typically need to look in the payload to determine ++ // the event type. If clients set it, it should be the same as the ++ // trigger that corresponding incoming would have (i.e., this has ++ // nothing to do with the underlying transport used to make the API ++ // call to invoke the lambda, which is often HTTP). ++ FaaSTriggerKey = attribute.Key("faas.trigger") ++ ++ // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" ++ // semantic conventions. It represents the execution ID of the current ++ // function execution. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' ++ FaaSExecutionKey = attribute.Key("faas.execution") ++) ++ ++var ( ++ // A response to some data source operation such as a database or filesystem read/write ++ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") ++ // To provide an answer to an inbound HTTP request ++ FaaSTriggerHTTP = FaaSTriggerKey.String("http") ++ // A function is set to be executed when messages are sent to a messaging system ++ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") ++ // A function is scheduled to be executed regularly ++ FaaSTriggerTimer = FaaSTriggerKey.String("timer") ++ // If none of the others apply ++ FaaSTriggerOther = FaaSTriggerKey.String("other") ++) ++ ++// FaaSExecution returns an attribute KeyValue conforming to the ++// "faas.execution" semantic conventions. It represents the execution ID of the ++// current function execution. ++func FaaSExecution(val string) attribute.KeyValue { ++ return FaaSExecutionKey.String(val) ++} ++ ++// Semantic Convention for FaaS triggered as a response to some data source ++// operation such as a database or filesystem read/write. ++const ( ++ // FaaSDocumentCollectionKey is the attribute Key conforming to the ++ // "faas.document.collection" semantic conventions. It represents the name ++ // of the source on which the triggering operation was performed. For ++ // example, in Cloud Storage or S3 corresponds to the bucket name, and in ++ // Cosmos DB to the database name. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myBucketName', 'myDBName' ++ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") ++ ++ // FaaSDocumentOperationKey is the attribute Key conforming to the ++ // "faas.document.operation" semantic conventions. It represents the ++ // describes the type of the operation that was performed on the data. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ FaaSDocumentOperationKey = attribute.Key("faas.document.operation") ++ ++ // FaaSDocumentTimeKey is the attribute Key conforming to the ++ // "faas.document.time" semantic conventions. It represents a string ++ // containing the time when the data was accessed in the [ISO ++ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2020-01-23T13:47:06Z' ++ FaaSDocumentTimeKey = attribute.Key("faas.document.time") ++ ++ // FaaSDocumentNameKey is the attribute Key conforming to the ++ // "faas.document.name" semantic conventions. It represents the document ++ // name/table subjected to the operation. For example, in Cloud Storage or ++ // S3 is the name of the file, and in Cosmos DB the table name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'myFile.txt', 'myTableName' ++ FaaSDocumentNameKey = attribute.Key("faas.document.name") ++) ++ ++var ( ++ // When a new object is created ++ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") ++ // When an object is modified ++ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") ++ // When an object is deleted ++ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ++) ++ ++// FaaSDocumentCollection returns an attribute KeyValue conforming to the ++// "faas.document.collection" semantic conventions. It represents the name of ++// the source on which the triggering operation was performed. For example, in ++// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the ++// database name. ++func FaaSDocumentCollection(val string) attribute.KeyValue { ++ return FaaSDocumentCollectionKey.String(val) ++} ++ ++// FaaSDocumentTime returns an attribute KeyValue conforming to the ++// "faas.document.time" semantic conventions. It represents a string containing ++// the time when the data was accessed in the [ISO ++// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++func FaaSDocumentTime(val string) attribute.KeyValue { ++ return FaaSDocumentTimeKey.String(val) ++} ++ ++// FaaSDocumentName returns an attribute KeyValue conforming to the ++// "faas.document.name" semantic conventions. It represents the document ++// name/table subjected to the operation. For example, in Cloud Storage or S3 ++// is the name of the file, and in Cosmos DB the table name. ++func FaaSDocumentName(val string) attribute.KeyValue { ++ return FaaSDocumentNameKey.String(val) ++} ++ ++// Semantic Convention for FaaS scheduled to be executed regularly. ++const ( ++ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic ++ // conventions. It represents a string containing the function invocation ++ // time in the [ISO ++ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2020-01-23T13:47:06Z' ++ FaaSTimeKey = attribute.Key("faas.time") ++ ++ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic ++ // conventions. It represents a string containing the schedule period as ++ // [Cron ++ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0/5 * * * ? *' ++ FaaSCronKey = attribute.Key("faas.cron") ++) ++ ++// FaaSTime returns an attribute KeyValue conforming to the "faas.time" ++// semantic conventions. It represents a string containing the function ++// invocation time in the [ISO ++// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++func FaaSTime(val string) attribute.KeyValue { ++ return FaaSTimeKey.String(val) ++} ++ ++// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" ++// semantic conventions. It represents a string containing the schedule period ++// as [Cron ++// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). ++func FaaSCron(val string) attribute.KeyValue { ++ return FaaSCronKey.String(val) ++} ++ ++// Contains additional attributes for incoming FaaS spans. ++const ( ++ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" ++ // semantic conventions. It represents a boolean that is true if the ++ // serverless function is executed for the first time (aka cold-start). ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ FaaSColdstartKey = attribute.Key("faas.coldstart") ++) ++ ++// FaaSColdstart returns an attribute KeyValue conforming to the ++// "faas.coldstart" semantic conventions. It represents a boolean that is true ++// if the serverless function is executed for the first time (aka cold-start). ++func FaaSColdstart(val bool) attribute.KeyValue { ++ return FaaSColdstartKey.Bool(val) ++} ++ ++// Contains additional attributes for outgoing FaaS spans. ++const ( ++ // FaaSInvokedNameKey is the attribute Key conforming to the ++ // "faas.invoked_name" semantic conventions. It represents the name of the ++ // invoked function. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'my-function' ++ // Note: SHOULD be equal to the `faas.name` resource attribute of the ++ // invoked function. ++ FaaSInvokedNameKey = attribute.Key("faas.invoked_name") ++ ++ // FaaSInvokedProviderKey is the attribute Key conforming to the ++ // "faas.invoked_provider" semantic conventions. It represents the cloud ++ // provider of the invoked function. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the ++ // invoked function. ++ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") ++ ++ // FaaSInvokedRegionKey is the attribute Key conforming to the ++ // "faas.invoked_region" semantic conventions. It represents the cloud ++ // region of the invoked function. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (For some cloud providers, like ++ // AWS or GCP, the region in which a function is hosted is essential to ++ // uniquely identify the function and also part of its endpoint. Since it's ++ // part of the endpoint being called, the region is always known to ++ // clients. In these cases, `faas.invoked_region` MUST be set accordingly. ++ // If the region is unknown to the client or not required for identifying ++ // the invoked function, setting `faas.invoked_region` is optional.) ++ // Stability: stable ++ // Examples: 'eu-central-1' ++ // Note: SHOULD be equal to the `cloud.region` resource attribute of the ++ // invoked function. ++ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ++) ++ ++var ( ++ // Alibaba Cloud ++ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") ++ // Amazon Web Services ++ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") ++ // Microsoft Azure ++ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") ++ // Google Cloud Platform ++ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") ++ // Tencent Cloud ++ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ++) ++ ++// FaaSInvokedName returns an attribute KeyValue conforming to the ++// "faas.invoked_name" semantic conventions. It represents the name of the ++// invoked function. ++func FaaSInvokedName(val string) attribute.KeyValue { ++ return FaaSInvokedNameKey.String(val) ++} ++ ++// FaaSInvokedRegion returns an attribute KeyValue conforming to the ++// "faas.invoked_region" semantic conventions. It represents the cloud region ++// of the invoked function. ++func FaaSInvokedRegion(val string) attribute.KeyValue { ++ return FaaSInvokedRegionKey.String(val) ++} ++ ++// These attributes may be used for any network related operation. ++const ( ++ // NetTransportKey is the attribute Key conforming to the "net.transport" ++ // semantic conventions. It represents the transport protocol used. See ++ // note below. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ NetTransportKey = attribute.Key("net.transport") ++ ++ // NetAppProtocolNameKey is the attribute Key conforming to the ++ // "net.app.protocol.name" semantic conventions. It represents the ++ // application layer protocol used. The value SHOULD be normalized to ++ // lowercase. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'amqp', 'http', 'mqtt' ++ NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") ++ ++ // NetAppProtocolVersionKey is the attribute Key conforming to the ++ // "net.app.protocol.version" semantic conventions. It represents the ++ // version of the application layer protocol used. See note below. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '3.1.1' ++ // Note: `net.app.protocol.version` refers to the version of the protocol ++ // used and might be different from the protocol client's version. If the ++ // HTTP client used has a version of `0.27.2`, but sends HTTP version ++ // `1.1`, this attribute should be set to `1.1`. ++ NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") ++ ++ // NetSockPeerNameKey is the attribute Key conforming to the ++ // "net.sock.peer.name" semantic conventions. It represents the remote ++ // socket peer name. ++ // ++ // Type: string ++ // RequirementLevel: Recommended (If available and different from ++ // `net.peer.name` and if `net.sock.peer.addr` is set.) ++ // Stability: stable ++ // Examples: 'proxy.example.com' ++ NetSockPeerNameKey = attribute.Key("net.sock.peer.name") ++ ++ // NetSockPeerAddrKey is the attribute Key conforming to the ++ // "net.sock.peer.addr" semantic conventions. It represents the remote ++ // socket peer address: IPv4 or IPv6 for internet protocols, path for local ++ // communication, ++ // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '127.0.0.1', '/tmp/mysql.sock' ++ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") ++ ++ // NetSockPeerPortKey is the attribute Key conforming to the ++ // "net.sock.peer.port" semantic conventions. It represents the remote ++ // socket peer port. ++ // ++ // Type: int ++ // RequirementLevel: Recommended (If defined for the address family and if ++ // different than `net.peer.port` and if `net.sock.peer.addr` is set.) ++ // Stability: stable ++ // Examples: 16456 ++ NetSockPeerPortKey = attribute.Key("net.sock.peer.port") ++ ++ // NetSockFamilyKey is the attribute Key conforming to the ++ // "net.sock.family" semantic conventions. It represents the protocol ++ // [address ++ // family](https://man7.org/linux/man-pages/man7/address_families.7.html) ++ // which is used for communication. ++ // ++ // Type: Enum ++ // RequirementLevel: ConditionallyRequired (If different than `inet` and if ++ // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers ++ // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in ++ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support ++ // instrumentations that follow previous versions of this document.) ++ // Stability: stable ++ // Examples: 'inet6', 'bluetooth' ++ NetSockFamilyKey = attribute.Key("net.sock.family") ++ ++ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" ++ // semantic conventions. It represents the logical remote hostname, see ++ // note below. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'example.com' ++ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an ++ // extra DNS lookup. ++ NetPeerNameKey = attribute.Key("net.peer.name") ++ ++ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" ++ // semantic conventions. It represents the logical remote port number ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 80, 8080, 443 ++ NetPeerPortKey = attribute.Key("net.peer.port") ++ ++ // NetHostNameKey is the attribute Key conforming to the "net.host.name" ++ // semantic conventions. It represents the logical local hostname or ++ // similar, see note below. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'localhost' ++ NetHostNameKey = attribute.Key("net.host.name") ++ ++ // NetHostPortKey is the attribute Key conforming to the "net.host.port" ++ // semantic conventions. It represents the logical local port number, ++ // preferably the one that the peer used to connect ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 8080 ++ NetHostPortKey = attribute.Key("net.host.port") ++ ++ // NetSockHostAddrKey is the attribute Key conforming to the ++ // "net.sock.host.addr" semantic conventions. It represents the local ++ // socket address. Useful in case of a multi-IP host. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '192.168.0.1' ++ NetSockHostAddrKey = attribute.Key("net.sock.host.addr") ++ ++ // NetSockHostPortKey is the attribute Key conforming to the ++ // "net.sock.host.port" semantic conventions. It represents the local ++ // socket port number. ++ // ++ // Type: int ++ // RequirementLevel: Recommended (If defined for the address family and if ++ // different than `net.host.port` and if `net.sock.host.addr` is set.) ++ // Stability: stable ++ // Examples: 35555 ++ NetSockHostPortKey = attribute.Key("net.sock.host.port") ++ ++ // NetHostConnectionTypeKey is the attribute Key conforming to the ++ // "net.host.connection.type" semantic conventions. It represents the ++ // internet connection type currently being used by the host. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'wifi' ++ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") ++ ++ // NetHostConnectionSubtypeKey is the attribute Key conforming to the ++ // "net.host.connection.subtype" semantic conventions. It represents the ++ // this describes more details regarding the connection.type. It may be the ++ // type of cell technology connection, but it could be used for describing ++ // details about a wifi connection. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'LTE' ++ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") ++ ++ // NetHostCarrierNameKey is the attribute Key conforming to the ++ // "net.host.carrier.name" semantic conventions. It represents the name of ++ // the mobile carrier. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'sprint' ++ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") ++ ++ // NetHostCarrierMccKey is the attribute Key conforming to the ++ // "net.host.carrier.mcc" semantic conventions. It represents the mobile ++ // carrier country code. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '310' ++ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") ++ ++ // NetHostCarrierMncKey is the attribute Key conforming to the ++ // "net.host.carrier.mnc" semantic conventions. It represents the mobile ++ // carrier network code. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '001' ++ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") ++ ++ // NetHostCarrierIccKey is the attribute Key conforming to the ++ // "net.host.carrier.icc" semantic conventions. It represents the ISO ++ // 3166-1 alpha-2 2-character country code associated with the mobile ++ // carrier network. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'DE' ++ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ++) ++ ++var ( ++ // ip_tcp ++ NetTransportTCP = NetTransportKey.String("ip_tcp") ++ // ip_udp ++ NetTransportUDP = NetTransportKey.String("ip_udp") ++ // Named or anonymous pipe. See note below ++ NetTransportPipe = NetTransportKey.String("pipe") ++ // In-process communication ++ NetTransportInProc = NetTransportKey.String("inproc") ++ // Something else (non IP-based) ++ NetTransportOther = NetTransportKey.String("other") ++) ++ ++var ( ++ // IPv4 address ++ NetSockFamilyInet = NetSockFamilyKey.String("inet") ++ // IPv6 address ++ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") ++ // Unix domain socket path ++ NetSockFamilyUnix = NetSockFamilyKey.String("unix") ++) ++ ++var ( ++ // wifi ++ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") ++ // wired ++ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") ++ // cell ++ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") ++ // unavailable ++ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") ++ // unknown ++ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ++) ++ ++var ( ++ // GPRS ++ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") ++ // EDGE ++ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") ++ // UMTS ++ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") ++ // CDMA ++ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") ++ // EVDO Rel. 0 ++ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") ++ // EVDO Rev. A ++ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") ++ // CDMA2000 1XRTT ++ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") ++ // HSDPA ++ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") ++ // HSUPA ++ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") ++ // HSPA ++ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") ++ // IDEN ++ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") ++ // EVDO Rev. B ++ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") ++ // LTE ++ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") ++ // EHRPD ++ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") ++ // HSPAP ++ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") ++ // GSM ++ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") ++ // TD-SCDMA ++ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") ++ // IWLAN ++ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") ++ // 5G NR (New Radio) ++ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") ++ // 5G NRNSA (New Radio Non-Standalone) ++ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") ++ // LTE CA ++ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ++) ++ ++// NetAppProtocolName returns an attribute KeyValue conforming to the ++// "net.app.protocol.name" semantic conventions. It represents the application ++// layer protocol used. The value SHOULD be normalized to lowercase. ++func NetAppProtocolName(val string) attribute.KeyValue { ++ return NetAppProtocolNameKey.String(val) ++} ++ ++// NetAppProtocolVersion returns an attribute KeyValue conforming to the ++// "net.app.protocol.version" semantic conventions. It represents the version ++// of the application layer protocol used. See note below. ++func NetAppProtocolVersion(val string) attribute.KeyValue { ++ return NetAppProtocolVersionKey.String(val) ++} ++ ++// NetSockPeerName returns an attribute KeyValue conforming to the ++// "net.sock.peer.name" semantic conventions. It represents the remote socket ++// peer name. ++func NetSockPeerName(val string) attribute.KeyValue { ++ return NetSockPeerNameKey.String(val) ++} ++ ++// NetSockPeerAddr returns an attribute KeyValue conforming to the ++// "net.sock.peer.addr" semantic conventions. It represents the remote socket ++// peer address: IPv4 or IPv6 for internet protocols, path for local ++// communication, ++// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). ++func NetSockPeerAddr(val string) attribute.KeyValue { ++ return NetSockPeerAddrKey.String(val) ++} ++ ++// NetSockPeerPort returns an attribute KeyValue conforming to the ++// "net.sock.peer.port" semantic conventions. It represents the remote socket ++// peer port. ++func NetSockPeerPort(val int) attribute.KeyValue { ++ return NetSockPeerPortKey.Int(val) ++} ++ ++// NetPeerName returns an attribute KeyValue conforming to the ++// "net.peer.name" semantic conventions. It represents the logical remote ++// hostname, see note below. ++func NetPeerName(val string) attribute.KeyValue { ++ return NetPeerNameKey.String(val) ++} ++ ++// NetPeerPort returns an attribute KeyValue conforming to the ++// "net.peer.port" semantic conventions. It represents the logical remote port ++// number ++func NetPeerPort(val int) attribute.KeyValue { ++ return NetPeerPortKey.Int(val) ++} ++ ++// NetHostName returns an attribute KeyValue conforming to the ++// "net.host.name" semantic conventions. It represents the logical local ++// hostname or similar, see note below. ++func NetHostName(val string) attribute.KeyValue { ++ return NetHostNameKey.String(val) ++} ++ ++// NetHostPort returns an attribute KeyValue conforming to the ++// "net.host.port" semantic conventions. It represents the logical local port ++// number, preferably the one that the peer used to connect ++func NetHostPort(val int) attribute.KeyValue { ++ return NetHostPortKey.Int(val) ++} ++ ++// NetSockHostAddr returns an attribute KeyValue conforming to the ++// "net.sock.host.addr" semantic conventions. It represents the local socket ++// address. Useful in case of a multi-IP host. ++func NetSockHostAddr(val string) attribute.KeyValue { ++ return NetSockHostAddrKey.String(val) ++} ++ ++// NetSockHostPort returns an attribute KeyValue conforming to the ++// "net.sock.host.port" semantic conventions. It represents the local socket ++// port number. ++func NetSockHostPort(val int) attribute.KeyValue { ++ return NetSockHostPortKey.Int(val) ++} ++ ++// NetHostCarrierName returns an attribute KeyValue conforming to the ++// "net.host.carrier.name" semantic conventions. It represents the name of the ++// mobile carrier. ++func NetHostCarrierName(val string) attribute.KeyValue { ++ return NetHostCarrierNameKey.String(val) ++} ++ ++// NetHostCarrierMcc returns an attribute KeyValue conforming to the ++// "net.host.carrier.mcc" semantic conventions. It represents the mobile ++// carrier country code. ++func NetHostCarrierMcc(val string) attribute.KeyValue { ++ return NetHostCarrierMccKey.String(val) ++} ++ ++// NetHostCarrierMnc returns an attribute KeyValue conforming to the ++// "net.host.carrier.mnc" semantic conventions. It represents the mobile ++// carrier network code. ++func NetHostCarrierMnc(val string) attribute.KeyValue { ++ return NetHostCarrierMncKey.String(val) ++} ++ ++// NetHostCarrierIcc returns an attribute KeyValue conforming to the ++// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 ++// alpha-2 2-character country code associated with the mobile carrier network. ++func NetHostCarrierIcc(val string) attribute.KeyValue { ++ return NetHostCarrierIccKey.String(val) ++} ++ ++// Operations that access some remote service. ++const ( ++ // PeerServiceKey is the attribute Key conforming to the "peer.service" ++ // semantic conventions. It represents the ++ // [`service.name`](../../resource/semantic_conventions/README.md#service) ++ // of the remote service. SHOULD be equal to the actual `service.name` ++ // resource attribute of the remote service if any. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'AuthTokenCache' ++ PeerServiceKey = attribute.Key("peer.service") ++) ++ ++// PeerService returns an attribute KeyValue conforming to the ++// "peer.service" semantic conventions. It represents the ++// [`service.name`](../../resource/semantic_conventions/README.md#service) of ++// the remote service. SHOULD be equal to the actual `service.name` resource ++// attribute of the remote service if any. ++func PeerService(val string) attribute.KeyValue { ++ return PeerServiceKey.String(val) ++} ++ ++// These attributes may be used for any operation with an authenticated and/or ++// authorized enduser. ++const ( ++ // EnduserIDKey is the attribute Key conforming to the "enduser.id" ++ // semantic conventions. It represents the username or client_id extracted ++ // from the access token or ++ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header ++ // in the inbound request from outside the system. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'username' ++ EnduserIDKey = attribute.Key("enduser.id") ++ ++ // EnduserRoleKey is the attribute Key conforming to the "enduser.role" ++ // semantic conventions. It represents the actual/assumed role the client ++ // is making the request under extracted from token or application security ++ // context. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'admin' ++ EnduserRoleKey = attribute.Key("enduser.role") ++ ++ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" ++ // semantic conventions. It represents the scopes or granted authorities ++ // the client currently possesses extracted from token or application ++ // security context. The value would come from the scope associated with an ++ // [OAuth 2.0 Access ++ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute ++ // value in a [SAML 2.0 ++ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'read:message, write:files' ++ EnduserScopeKey = attribute.Key("enduser.scope") ++) ++ ++// EnduserID returns an attribute KeyValue conforming to the "enduser.id" ++// semantic conventions. It represents the username or client_id extracted from ++// the access token or ++// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in ++// the inbound request from outside the system. ++func EnduserID(val string) attribute.KeyValue { ++ return EnduserIDKey.String(val) ++} ++ ++// EnduserRole returns an attribute KeyValue conforming to the ++// "enduser.role" semantic conventions. It represents the actual/assumed role ++// the client is making the request under extracted from token or application ++// security context. ++func EnduserRole(val string) attribute.KeyValue { ++ return EnduserRoleKey.String(val) ++} ++ ++// EnduserScope returns an attribute KeyValue conforming to the ++// "enduser.scope" semantic conventions. It represents the scopes or granted ++// authorities the client currently possesses extracted from token or ++// application security context. The value would come from the scope associated ++// with an [OAuth 2.0 Access ++// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute ++// value in a [SAML 2.0 ++// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). ++func EnduserScope(val string) attribute.KeyValue { ++ return EnduserScopeKey.String(val) ++} ++ ++// These attributes may be used for any operation to store information about a ++// thread that started a span. ++const ( ++ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic ++ // conventions. It represents the current "managed" thread ID (as opposed ++ // to OS thread ID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ ThreadIDKey = attribute.Key("thread.id") ++ ++ // ThreadNameKey is the attribute Key conforming to the "thread.name" ++ // semantic conventions. It represents the current thread name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'main' ++ ThreadNameKey = attribute.Key("thread.name") ++) ++ ++// ThreadID returns an attribute KeyValue conforming to the "thread.id" ++// semantic conventions. It represents the current "managed" thread ID (as ++// opposed to OS thread ID). ++func ThreadID(val int) attribute.KeyValue { ++ return ThreadIDKey.Int(val) ++} ++ ++// ThreadName returns an attribute KeyValue conforming to the "thread.name" ++// semantic conventions. It represents the current thread name. ++func ThreadName(val string) attribute.KeyValue { ++ return ThreadNameKey.String(val) ++} ++ ++// These attributes allow to report this unit of code and therefore to provide ++// more context about the span. ++const ( ++ // CodeFunctionKey is the attribute Key conforming to the "code.function" ++ // semantic conventions. It represents the method or function name, or ++ // equivalent (usually rightmost part of the code unit's name). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'serveRequest' ++ CodeFunctionKey = attribute.Key("code.function") ++ ++ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" ++ // semantic conventions. It represents the "namespace" within which ++ // `code.function` is defined. Usually the qualified class or module name, ++ // such that `code.namespace` + some separator + `code.function` form a ++ // unique identifier for the code unit. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'com.example.MyHTTPService' ++ CodeNamespaceKey = attribute.Key("code.namespace") ++ ++ // CodeFilepathKey is the attribute Key conforming to the "code.filepath" ++ // semantic conventions. It represents the source code file name that ++ // identifies the code unit as uniquely as possible (preferably an absolute ++ // file path). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/usr/local/MyApplication/content_root/app/index.php' ++ CodeFilepathKey = attribute.Key("code.filepath") ++ ++ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" ++ // semantic conventions. It represents the line number in `code.filepath` ++ // best representing the operation. It SHOULD point within the code unit ++ // named in `code.function`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ CodeLineNumberKey = attribute.Key("code.lineno") ++ ++ // CodeColumnKey is the attribute Key conforming to the "code.column" ++ // semantic conventions. It represents the column number in `code.filepath` ++ // best representing the operation. It SHOULD point within the code unit ++ // named in `code.function`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 16 ++ CodeColumnKey = attribute.Key("code.column") ++) ++ ++// CodeFunction returns an attribute KeyValue conforming to the ++// "code.function" semantic conventions. It represents the method or function ++// name, or equivalent (usually rightmost part of the code unit's name). ++func CodeFunction(val string) attribute.KeyValue { ++ return CodeFunctionKey.String(val) ++} ++ ++// CodeNamespace returns an attribute KeyValue conforming to the ++// "code.namespace" semantic conventions. It represents the "namespace" within ++// which `code.function` is defined. Usually the qualified class or module ++// name, such that `code.namespace` + some separator + `code.function` form a ++// unique identifier for the code unit. ++func CodeNamespace(val string) attribute.KeyValue { ++ return CodeNamespaceKey.String(val) ++} ++ ++// CodeFilepath returns an attribute KeyValue conforming to the ++// "code.filepath" semantic conventions. It represents the source code file ++// name that identifies the code unit as uniquely as possible (preferably an ++// absolute file path). ++func CodeFilepath(val string) attribute.KeyValue { ++ return CodeFilepathKey.String(val) ++} ++ ++// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" ++// semantic conventions. It represents the line number in `code.filepath` best ++// representing the operation. It SHOULD point within the code unit named in ++// `code.function`. ++func CodeLineNumber(val int) attribute.KeyValue { ++ return CodeLineNumberKey.Int(val) ++} ++ ++// CodeColumn returns an attribute KeyValue conforming to the "code.column" ++// semantic conventions. It represents the column number in `code.filepath` ++// best representing the operation. It SHOULD point within the code unit named ++// in `code.function`. ++func CodeColumn(val int) attribute.KeyValue { ++ return CodeColumnKey.Int(val) ++} ++ ++// Semantic conventions for HTTP client and server Spans. ++const ( ++ // HTTPMethodKey is the attribute Key conforming to the "http.method" ++ // semantic conventions. It represents the hTTP request method. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'GET', 'POST', 'HEAD' ++ HTTPMethodKey = attribute.Key("http.method") ++ ++ // HTTPStatusCodeKey is the attribute Key conforming to the ++ // "http.status_code" semantic conventions. It represents the [HTTP ++ // response status code](https://tools.ietf.org/html/rfc7231#section-6). ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If and only if one was ++ // received/sent.) ++ // Stability: stable ++ // Examples: 200 ++ HTTPStatusCodeKey = attribute.Key("http.status_code") ++ ++ // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" ++ // semantic conventions. It represents the kind of HTTP protocol used. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: If `net.transport` is not specified, it can be assumed to be ++ // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is ++ // assumed. ++ HTTPFlavorKey = attribute.Key("http.flavor") ++ ++ // HTTPUserAgentKey is the attribute Key conforming to the ++ // "http.user_agent" semantic conventions. It represents the value of the ++ // [HTTP ++ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) ++ // header sent by the client. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' ++ HTTPUserAgentKey = attribute.Key("http.user_agent") ++ ++ // HTTPRequestContentLengthKey is the attribute Key conforming to the ++ // "http.request_content_length" semantic conventions. It represents the ++ // size of the request payload body in bytes. This is the number of bytes ++ // transferred excluding headers and is often, but not always, present as ++ // the ++ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++ // header. For requests using transport encoding, this should be the ++ // compressed size. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3495 ++ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") ++ ++ // HTTPResponseContentLengthKey is the attribute Key conforming to the ++ // "http.response_content_length" semantic conventions. It represents the ++ // size of the response payload body in bytes. This is the number of bytes ++ // transferred excluding headers and is often, but not always, present as ++ // the ++ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++ // header. For requests using transport encoding, this should be the ++ // compressed size. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3495 ++ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ++) ++ ++var ( ++ // HTTP/1.0 ++ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") ++ // HTTP/1.1 ++ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") ++ // HTTP/2 ++ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") ++ // HTTP/3 ++ HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") ++ // SPDY protocol ++ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") ++ // QUIC protocol ++ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ++) ++ ++// HTTPMethod returns an attribute KeyValue conforming to the "http.method" ++// semantic conventions. It represents the hTTP request method. ++func HTTPMethod(val string) attribute.KeyValue { ++ return HTTPMethodKey.String(val) ++} ++ ++// HTTPStatusCode returns an attribute KeyValue conforming to the ++// "http.status_code" semantic conventions. It represents the [HTTP response ++// status code](https://tools.ietf.org/html/rfc7231#section-6). ++func HTTPStatusCode(val int) attribute.KeyValue { ++ return HTTPStatusCodeKey.Int(val) ++} ++ ++// HTTPUserAgent returns an attribute KeyValue conforming to the ++// "http.user_agent" semantic conventions. It represents the value of the [HTTP ++// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) ++// header sent by the client. ++func HTTPUserAgent(val string) attribute.KeyValue { ++ return HTTPUserAgentKey.String(val) ++} ++ ++// HTTPRequestContentLength returns an attribute KeyValue conforming to the ++// "http.request_content_length" semantic conventions. It represents the size ++// of the request payload body in bytes. This is the number of bytes ++// transferred excluding headers and is often, but not always, present as the ++// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++// header. For requests using transport encoding, this should be the compressed ++// size. ++func HTTPRequestContentLength(val int) attribute.KeyValue { ++ return HTTPRequestContentLengthKey.Int(val) ++} ++ ++// HTTPResponseContentLength returns an attribute KeyValue conforming to the ++// "http.response_content_length" semantic conventions. It represents the size ++// of the response payload body in bytes. This is the number of bytes ++// transferred excluding headers and is often, but not always, present as the ++// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++// header. For requests using transport encoding, this should be the compressed ++// size. ++func HTTPResponseContentLength(val int) attribute.KeyValue { ++ return HTTPResponseContentLengthKey.Int(val) ++} ++ ++// Semantic Convention for HTTP Client ++const ( ++ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic ++ // conventions. It represents the full HTTP request URL in the form ++ // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is ++ // not transmitted over HTTP, but if it is known, it should be included ++ // nevertheless. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' ++ // Note: `http.url` MUST NOT contain credentials passed via URL in form of ++ // `https://username:password@www.example.com/`. In such case the ++ // attribute's value should be `https://www.example.com/`. ++ HTTPURLKey = attribute.Key("http.url") ++ ++ // HTTPResendCountKey is the attribute Key conforming to the ++ // "http.resend_count" semantic conventions. It represents the ordinal ++ // number of request resending attempt (for any reason, including ++ // redirects). ++ // ++ // Type: int ++ // RequirementLevel: Recommended (if and only if request was retried.) ++ // Stability: stable ++ // Examples: 3 ++ // Note: The resend count SHOULD be updated each time an HTTP request gets ++ // resent by the client, regardless of what was the cause of the resending ++ // (e.g. redirection, authorization failure, 503 Server Unavailable, ++ // network issues, or any other). ++ HTTPResendCountKey = attribute.Key("http.resend_count") ++) ++ ++// HTTPURL returns an attribute KeyValue conforming to the "http.url" ++// semantic conventions. It represents the full HTTP request URL in the form ++// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not ++// transmitted over HTTP, but if it is known, it should be included ++// nevertheless. ++func HTTPURL(val string) attribute.KeyValue { ++ return HTTPURLKey.String(val) ++} ++ ++// HTTPResendCount returns an attribute KeyValue conforming to the ++// "http.resend_count" semantic conventions. It represents the ordinal number ++// of request resending attempt (for any reason, including redirects). ++func HTTPResendCount(val int) attribute.KeyValue { ++ return HTTPResendCountKey.Int(val) ++} ++ ++// Semantic Convention for HTTP Server ++const ( ++ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" ++ // semantic conventions. It represents the URI scheme identifying the used ++ // protocol. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'http', 'https' ++ HTTPSchemeKey = attribute.Key("http.scheme") ++ ++ // HTTPTargetKey is the attribute Key conforming to the "http.target" ++ // semantic conventions. It represents the full request target as passed in ++ // a HTTP request line or equivalent. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: '/path/12314/?q=ddds' ++ HTTPTargetKey = attribute.Key("http.target") ++ ++ // HTTPRouteKey is the attribute Key conforming to the "http.route" ++ // semantic conventions. It represents the matched route (path template in ++ // the format used by the respective server framework). See note below ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If and only if it's available) ++ // Stability: stable ++ // Examples: '/users/:userID?', '{controller}/{action}/{id?}' ++ // Note: 'http.route' MUST NOT be populated when this is not supported by ++ // the HTTP server framework as the route attribute should have ++ // low-cardinality and the URI path can NOT substitute it. ++ HTTPRouteKey = attribute.Key("http.route") ++ ++ // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" ++ // semantic conventions. It represents the IP address of the original ++ // client behind all proxies, if known (e.g. from ++ // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '83.164.160.102' ++ // Note: This is not necessarily the same as `net.sock.peer.addr`, which ++ // would ++ // identify the network-level peer, which may be a proxy. ++ // ++ // This attribute should be set when a source of information different ++ // from the one used for `net.sock.peer.addr`, is available even if that ++ // other ++ // source just confirms the same value as `net.sock.peer.addr`. ++ // Rationale: For `net.sock.peer.addr`, one typically does not know if it ++ // comes from a proxy, reverse proxy, or the actual client. Setting ++ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that ++ // one is at least somewhat confident that the address is not that of ++ // the closest proxy. ++ HTTPClientIPKey = attribute.Key("http.client_ip") ++) ++ ++// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" ++// semantic conventions. It represents the URI scheme identifying the used ++// protocol. ++func HTTPScheme(val string) attribute.KeyValue { ++ return HTTPSchemeKey.String(val) ++} ++ ++// HTTPTarget returns an attribute KeyValue conforming to the "http.target" ++// semantic conventions. It represents the full request target as passed in a ++// HTTP request line or equivalent. ++func HTTPTarget(val string) attribute.KeyValue { ++ return HTTPTargetKey.String(val) ++} ++ ++// HTTPRoute returns an attribute KeyValue conforming to the "http.route" ++// semantic conventions. It represents the matched route (path template in the ++// format used by the respective server framework). See note below ++func HTTPRoute(val string) attribute.KeyValue { ++ return HTTPRouteKey.String(val) ++} ++ ++// HTTPClientIP returns an attribute KeyValue conforming to the ++// "http.client_ip" semantic conventions. It represents the IP address of the ++// original client behind all proxies, if known (e.g. from ++// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). ++func HTTPClientIP(val string) attribute.KeyValue { ++ return HTTPClientIPKey.String(val) ++} ++ ++// Attributes that exist for multiple DynamoDB request types. ++const ( ++ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the ++ // "aws.dynamodb.table_names" semantic conventions. It represents the keys ++ // in the `RequestItems` object field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Users', 'Cats' ++ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") ++ ++ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the ++ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the ++ // JSON-serialized value of each item in the `ConsumedCapacity` response ++ // field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { ++ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : ++ // { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": ++ // { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number }, "TableName": "string", ++ // "WriteCapacityUnits": number }' ++ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") ++ ++ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to ++ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It ++ // represents the JSON-serialized value of the `ItemCollectionMetrics` ++ // response field. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": ++ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { ++ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], ++ // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, ++ // "SizeEstimateRangeGB": [ number ] } ] }' ++ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") ++ ++ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to ++ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It ++ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` ++ // request parameter. ++ // ++ // Type: double ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1.0, 2.0 ++ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") ++ ++ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming ++ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. ++ // It represents the value of the ++ // `ProvisionedThroughput.WriteCapacityUnits` request parameter. ++ // ++ // Type: double ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1.0, 2.0 ++ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") ++ ++ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the ++ // "aws.dynamodb.consistent_read" semantic conventions. It represents the ++ // value of the `ConsistentRead` request parameter. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") ++ ++ // AWSDynamoDBProjectionKey is the attribute Key conforming to the ++ // "aws.dynamodb.projection" semantic conventions. It represents the value ++ // of the `ProjectionExpression` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Title', 'Title, Price, Color', 'Title, Description, ++ // RelatedItems, ProductReviews' ++ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") ++ ++ // AWSDynamoDBLimitKey is the attribute Key conforming to the ++ // "aws.dynamodb.limit" semantic conventions. It represents the value of ++ // the `Limit` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") ++ ++ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the ++ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the ++ // value of the `AttributesToGet` request parameter. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'lives', 'id' ++ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") ++ ++ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the ++ // "aws.dynamodb.index_name" semantic conventions. It represents the value ++ // of the `IndexName` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'name_to_group' ++ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") ++ ++ // AWSDynamoDBSelectKey is the attribute Key conforming to the ++ // "aws.dynamodb.select" semantic conventions. It represents the value of ++ // the `Select` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'ALL_ATTRIBUTES', 'COUNT' ++ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ++) ++ ++// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the ++// "aws.dynamodb.table_names" semantic conventions. It represents the keys in ++// the `RequestItems` object field. ++func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { ++ return AWSDynamoDBTableNamesKey.StringSlice(val) ++} ++ ++// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to ++// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the ++// JSON-serialized value of each item in the `ConsumedCapacity` response field. ++func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { ++ return AWSDynamoDBConsumedCapacityKey.StringSlice(val) ++} ++ ++// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming ++// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It ++// represents the JSON-serialized value of the `ItemCollectionMetrics` response ++// field. ++func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { ++ return AWSDynamoDBItemCollectionMetricsKey.String(val) ++} ++ ++// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue ++// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic ++// conventions. It represents the value of the ++// `ProvisionedThroughput.ReadCapacityUnits` request parameter. ++func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { ++ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) ++} ++ ++// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue ++// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic ++// conventions. It represents the value of the ++// `ProvisionedThroughput.WriteCapacityUnits` request parameter. ++func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { ++ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) ++} ++ ++// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the ++// "aws.dynamodb.consistent_read" semantic conventions. It represents the value ++// of the `ConsistentRead` request parameter. ++func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { ++ return AWSDynamoDBConsistentReadKey.Bool(val) ++} ++ ++// AWSDynamoDBProjection returns an attribute KeyValue conforming to the ++// "aws.dynamodb.projection" semantic conventions. It represents the value of ++// the `ProjectionExpression` request parameter. ++func AWSDynamoDBProjection(val string) attribute.KeyValue { ++ return AWSDynamoDBProjectionKey.String(val) ++} ++ ++// AWSDynamoDBLimit returns an attribute KeyValue conforming to the ++// "aws.dynamodb.limit" semantic conventions. It represents the value of the ++// `Limit` request parameter. ++func AWSDynamoDBLimit(val int) attribute.KeyValue { ++ return AWSDynamoDBLimitKey.Int(val) ++} ++ ++// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to ++// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the ++// value of the `AttributesToGet` request parameter. ++func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { ++ return AWSDynamoDBAttributesToGetKey.StringSlice(val) ++} ++ ++// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the ++// "aws.dynamodb.index_name" semantic conventions. It represents the value of ++// the `IndexName` request parameter. ++func AWSDynamoDBIndexName(val string) attribute.KeyValue { ++ return AWSDynamoDBIndexNameKey.String(val) ++} ++ ++// AWSDynamoDBSelect returns an attribute KeyValue conforming to the ++// "aws.dynamodb.select" semantic conventions. It represents the value of the ++// `Select` request parameter. ++func AWSDynamoDBSelect(val string) attribute.KeyValue { ++ return AWSDynamoDBSelectKey.String(val) ++} ++ ++// DynamoDB.CreateTable ++const ( ++ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to ++ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It ++ // represents the JSON-serialized value of each item of the ++ // `GlobalSecondaryIndexes` request field ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": ++ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ ++ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { ++ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' ++ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") ++ ++ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to ++ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It ++ // represents the JSON-serialized value of each item of the ++ // `LocalSecondaryIndexes` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "IndexARN": "string", "IndexName": "string", ++ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { ++ // "AttributeName": "string", "KeyType": "string" } ], "Projection": { ++ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' ++ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ++) ++ ++// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue ++// conforming to the "aws.dynamodb.global_secondary_indexes" semantic ++// conventions. It represents the JSON-serialized value of each item of the ++// `GlobalSecondaryIndexes` request field ++func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { ++ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) ++} ++ ++// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming ++// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It ++// represents the JSON-serialized value of each item of the ++// `LocalSecondaryIndexes` request field. ++func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { ++ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) ++} ++ ++// DynamoDB.ListTables ++const ( ++ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the ++ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents ++ // the value of the `ExclusiveStartTableName` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Users', 'CatsTable' ++ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") ++ ++ // AWSDynamoDBTableCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.table_count" semantic conventions. It represents the the ++ // number of items in the `TableNames` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 20 ++ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ++) ++ ++// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming ++// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It ++// represents the value of the `ExclusiveStartTableName` request parameter. ++func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { ++ return AWSDynamoDBExclusiveStartTableKey.String(val) ++} ++ ++// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.table_count" semantic conventions. It represents the the ++// number of items in the `TableNames` response parameter. ++func AWSDynamoDBTableCount(val int) attribute.KeyValue { ++ return AWSDynamoDBTableCountKey.Int(val) ++} ++ ++// DynamoDB.Query ++const ( ++ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the ++ // "aws.dynamodb.scan_forward" semantic conventions. It represents the ++ // value of the `ScanIndexForward` request parameter. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ++) ++ ++// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the ++// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of ++// the `ScanIndexForward` request parameter. ++func AWSDynamoDBScanForward(val bool) attribute.KeyValue { ++ return AWSDynamoDBScanForwardKey.Bool(val) ++} ++ ++// DynamoDB.Scan ++const ( ++ // AWSDynamoDBSegmentKey is the attribute Key conforming to the ++ // "aws.dynamodb.segment" semantic conventions. It represents the value of ++ // the `Segment` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") ++ ++ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the ++ // "aws.dynamodb.total_segments" semantic conventions. It represents the ++ // value of the `TotalSegments` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 100 ++ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") ++ ++ // AWSDynamoDBCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.count" semantic conventions. It represents the value of ++ // the `Count` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") ++ ++ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.scanned_count" semantic conventions. It represents the ++ // value of the `ScannedCount` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 50 ++ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ++) ++ ++// AWSDynamoDBSegment returns an attribute KeyValue conforming to the ++// "aws.dynamodb.segment" semantic conventions. It represents the value of the ++// `Segment` request parameter. ++func AWSDynamoDBSegment(val int) attribute.KeyValue { ++ return AWSDynamoDBSegmentKey.Int(val) ++} ++ ++// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the ++// "aws.dynamodb.total_segments" semantic conventions. It represents the value ++// of the `TotalSegments` request parameter. ++func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { ++ return AWSDynamoDBTotalSegmentsKey.Int(val) ++} ++ ++// AWSDynamoDBCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.count" semantic conventions. It represents the value of the ++// `Count` response parameter. ++func AWSDynamoDBCount(val int) attribute.KeyValue { ++ return AWSDynamoDBCountKey.Int(val) ++} ++ ++// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.scanned_count" semantic conventions. It represents the value ++// of the `ScannedCount` response parameter. ++func AWSDynamoDBScannedCount(val int) attribute.KeyValue { ++ return AWSDynamoDBScannedCountKey.Int(val) ++} ++ ++// DynamoDB.UpdateTable ++const ( ++ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to ++ // the "aws.dynamodb.attribute_definitions" semantic conventions. It ++ // represents the JSON-serialized value of each item in the ++ // `AttributeDefinitions` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' ++ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") ++ ++ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key ++ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic ++ // conventions. It represents the JSON-serialized value of each item in the ++ // the `GlobalSecondaryIndexUpdates` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { ++ // "AttributeName": "string", "KeyType": "string" } ], "Projection": { ++ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, ++ // "ProvisionedThroughput": { "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }' ++ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ++) ++ ++// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming ++// to the "aws.dynamodb.attribute_definitions" semantic conventions. It ++// represents the JSON-serialized value of each item in the ++// `AttributeDefinitions` request field. ++func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { ++ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) ++} ++ ++// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue ++// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic ++// conventions. It represents the JSON-serialized value of each item in the the ++// `GlobalSecondaryIndexUpdates` request field. ++func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { ++ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) ++} ++ ++// Semantic conventions to apply when instrumenting the GraphQL implementation. ++// They map GraphQL operations to attributes on a Span. ++const ( ++ // GraphqlOperationNameKey is the attribute Key conforming to the ++ // "graphql.operation.name" semantic conventions. It represents the name of ++ // the operation being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'findBookByID' ++ GraphqlOperationNameKey = attribute.Key("graphql.operation.name") ++ ++ // GraphqlOperationTypeKey is the attribute Key conforming to the ++ // "graphql.operation.type" semantic conventions. It represents the type of ++ // the operation being executed. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'query', 'mutation', 'subscription' ++ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") ++ ++ // GraphqlDocumentKey is the attribute Key conforming to the ++ // "graphql.document" semantic conventions. It represents the GraphQL ++ // document being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'query findBookByID { bookByID(id: ?) { name } }' ++ // Note: The value may be sanitized to exclude sensitive information. ++ GraphqlDocumentKey = attribute.Key("graphql.document") ++) ++ ++var ( ++ // GraphQL query ++ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") ++ // GraphQL mutation ++ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") ++ // GraphQL subscription ++ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ++) ++ ++// GraphqlOperationName returns an attribute KeyValue conforming to the ++// "graphql.operation.name" semantic conventions. It represents the name of the ++// operation being executed. ++func GraphqlOperationName(val string) attribute.KeyValue { ++ return GraphqlOperationNameKey.String(val) ++} ++ ++// GraphqlDocument returns an attribute KeyValue conforming to the ++// "graphql.document" semantic conventions. It represents the GraphQL document ++// being executed. ++func GraphqlDocument(val string) attribute.KeyValue { ++ return GraphqlDocumentKey.String(val) ++} ++ ++// Semantic convention describing per-message attributes populated on messaging ++// spans or links. ++const ( ++ // MessagingMessageIDKey is the attribute Key conforming to the ++ // "messaging.message.id" semantic conventions. It represents a value used ++ // by the messaging system as an identifier for the message, represented as ++ // a string. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '452a7c7c7c7048c2f887f61572b18fc2' ++ MessagingMessageIDKey = attribute.Key("messaging.message.id") ++ ++ // MessagingMessageConversationIDKey is the attribute Key conforming to the ++ // "messaging.message.conversation_id" semantic conventions. It represents ++ // the [conversation ID](#conversations) identifying the conversation to ++ // which the message belongs, represented as a string. Sometimes called ++ // "Correlation ID". ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MyConversationID' ++ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") ++ ++ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to ++ // the "messaging.message.payload_size_bytes" semantic conventions. It ++ // represents the (uncompressed) size of the message payload in bytes. Also ++ // use this attribute if it is unknown whether the compressed or ++ // uncompressed payload size is reported. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2738 ++ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") ++ ++ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key ++ // conforming to the "messaging.message.payload_compressed_size_bytes" ++ // semantic conventions. It represents the compressed size of the message ++ // payload in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2048 ++ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") ++) ++ ++// MessagingMessageID returns an attribute KeyValue conforming to the ++// "messaging.message.id" semantic conventions. It represents a value used by ++// the messaging system as an identifier for the message, represented as a ++// string. ++func MessagingMessageID(val string) attribute.KeyValue { ++ return MessagingMessageIDKey.String(val) ++} ++ ++// MessagingMessageConversationID returns an attribute KeyValue conforming ++// to the "messaging.message.conversation_id" semantic conventions. It ++// represents the [conversation ID](#conversations) identifying the ++// conversation to which the message belongs, represented as a string. ++// Sometimes called "Correlation ID". ++func MessagingMessageConversationID(val string) attribute.KeyValue { ++ return MessagingMessageConversationIDKey.String(val) ++} ++ ++// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming ++// to the "messaging.message.payload_size_bytes" semantic conventions. It ++// represents the (uncompressed) size of the message payload in bytes. Also use ++// this attribute if it is unknown whether the compressed or uncompressed ++// payload size is reported. ++func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { ++ return MessagingMessagePayloadSizeBytesKey.Int(val) ++} ++ ++// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue ++// conforming to the "messaging.message.payload_compressed_size_bytes" semantic ++// conventions. It represents the compressed size of the message payload in ++// bytes. ++func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { ++ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) ++} ++ ++// Semantic convention for attributes that describe messaging destination on ++// broker ++const ( ++ // MessagingDestinationNameKey is the attribute Key conforming to the ++ // "messaging.destination.name" semantic conventions. It represents the ++ // message destination name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MyQueue', 'MyTopic' ++ // Note: Destination name SHOULD uniquely identify a specific queue, topic ++ // or other entity within the broker. If ++ // the broker does not have such notion, the destination name SHOULD ++ // uniquely identify the broker. ++ MessagingDestinationNameKey = attribute.Key("messaging.destination.name") ++ ++ // MessagingDestinationKindKey is the attribute Key conforming to the ++ // "messaging.destination.kind" semantic conventions. It represents the ++ // kind of message destination ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") ++ ++ // MessagingDestinationTemplateKey is the attribute Key conforming to the ++ // "messaging.destination.template" semantic conventions. It represents the ++ // low cardinality representation of the messaging destination name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/customers/{customerID}' ++ // Note: Destination names could be constructed from templates. An example ++ // would be a destination name involving a user name or product id. ++ // Although the destination name in this case is of high cardinality, the ++ // underlying template is of low cardinality and can be effectively used ++ // for grouping and aggregation. ++ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") ++ ++ // MessagingDestinationTemporaryKey is the attribute Key conforming to the ++ // "messaging.destination.temporary" semantic conventions. It represents a ++ // boolean that is true if the message destination is temporary and might ++ // not exist anymore after messages are processed. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") ++ ++ // MessagingDestinationAnonymousKey is the attribute Key conforming to the ++ // "messaging.destination.anonymous" semantic conventions. It represents a ++ // boolean that is true if the message destination is anonymous (could be ++ // unnamed or have auto-generated name). ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") ++) ++ ++var ( ++ // A message sent to a queue ++ MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") ++ // A message sent to a topic ++ MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ++) ++ ++// MessagingDestinationName returns an attribute KeyValue conforming to the ++// "messaging.destination.name" semantic conventions. It represents the message ++// destination name ++func MessagingDestinationName(val string) attribute.KeyValue { ++ return MessagingDestinationNameKey.String(val) ++} ++ ++// MessagingDestinationTemplate returns an attribute KeyValue conforming to ++// the "messaging.destination.template" semantic conventions. It represents the ++// low cardinality representation of the messaging destination name ++func MessagingDestinationTemplate(val string) attribute.KeyValue { ++ return MessagingDestinationTemplateKey.String(val) ++} ++ ++// MessagingDestinationTemporary returns an attribute KeyValue conforming to ++// the "messaging.destination.temporary" semantic conventions. It represents a ++// boolean that is true if the message destination is temporary and might not ++// exist anymore after messages are processed. ++func MessagingDestinationTemporary(val bool) attribute.KeyValue { ++ return MessagingDestinationTemporaryKey.Bool(val) ++} ++ ++// MessagingDestinationAnonymous returns an attribute KeyValue conforming to ++// the "messaging.destination.anonymous" semantic conventions. It represents a ++// boolean that is true if the message destination is anonymous (could be ++// unnamed or have auto-generated name). ++func MessagingDestinationAnonymous(val bool) attribute.KeyValue { ++ return MessagingDestinationAnonymousKey.Bool(val) ++} ++ ++// Semantic convention for attributes that describe messaging source on broker ++const ( ++ // MessagingSourceNameKey is the attribute Key conforming to the ++ // "messaging.source.name" semantic conventions. It represents the message ++ // source name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MyQueue', 'MyTopic' ++ // Note: Source name SHOULD uniquely identify a specific queue, topic, or ++ // other entity within the broker. If ++ // the broker does not have such notion, the source name SHOULD uniquely ++ // identify the broker. ++ MessagingSourceNameKey = attribute.Key("messaging.source.name") ++ ++ // MessagingSourceKindKey is the attribute Key conforming to the ++ // "messaging.source.kind" semantic conventions. It represents the kind of ++ // message source ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingSourceKindKey = attribute.Key("messaging.source.kind") ++ ++ // MessagingSourceTemplateKey is the attribute Key conforming to the ++ // "messaging.source.template" semantic conventions. It represents the low ++ // cardinality representation of the messaging source name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/customers/{customerID}' ++ // Note: Source names could be constructed from templates. An example would ++ // be a source name involving a user name or product id. Although the ++ // source name in this case is of high cardinality, the underlying template ++ // is of low cardinality and can be effectively used for grouping and ++ // aggregation. ++ MessagingSourceTemplateKey = attribute.Key("messaging.source.template") ++ ++ // MessagingSourceTemporaryKey is the attribute Key conforming to the ++ // "messaging.source.temporary" semantic conventions. It represents a ++ // boolean that is true if the message source is temporary and might not ++ // exist anymore after messages are processed. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") ++ ++ // MessagingSourceAnonymousKey is the attribute Key conforming to the ++ // "messaging.source.anonymous" semantic conventions. It represents a ++ // boolean that is true if the message source is anonymous (could be ++ // unnamed or have auto-generated name). ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") ++) ++ ++var ( ++ // A message received from a queue ++ MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") ++ // A message received from a topic ++ MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") ++) ++ ++// MessagingSourceName returns an attribute KeyValue conforming to the ++// "messaging.source.name" semantic conventions. It represents the message ++// source name ++func MessagingSourceName(val string) attribute.KeyValue { ++ return MessagingSourceNameKey.String(val) ++} ++ ++// MessagingSourceTemplate returns an attribute KeyValue conforming to the ++// "messaging.source.template" semantic conventions. It represents the low ++// cardinality representation of the messaging source name ++func MessagingSourceTemplate(val string) attribute.KeyValue { ++ return MessagingSourceTemplateKey.String(val) ++} ++ ++// MessagingSourceTemporary returns an attribute KeyValue conforming to the ++// "messaging.source.temporary" semantic conventions. It represents a boolean ++// that is true if the message source is temporary and might not exist anymore ++// after messages are processed. ++func MessagingSourceTemporary(val bool) attribute.KeyValue { ++ return MessagingSourceTemporaryKey.Bool(val) ++} ++ ++// MessagingSourceAnonymous returns an attribute KeyValue conforming to the ++// "messaging.source.anonymous" semantic conventions. It represents a boolean ++// that is true if the message source is anonymous (could be unnamed or have ++// auto-generated name). ++func MessagingSourceAnonymous(val bool) attribute.KeyValue { ++ return MessagingSourceAnonymousKey.Bool(val) ++} ++ ++// General attributes used in messaging systems. ++const ( ++ // MessagingSystemKey is the attribute Key conforming to the ++ // "messaging.system" semantic conventions. It represents a string ++ // identifying the messaging system. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' ++ MessagingSystemKey = attribute.Key("messaging.system") ++ ++ // MessagingOperationKey is the attribute Key conforming to the ++ // "messaging.operation" semantic conventions. It represents a string ++ // identifying the kind of messaging operation as defined in the [Operation ++ // names](#operation-names) section above. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: If a custom value is used, it MUST be of low cardinality. ++ MessagingOperationKey = attribute.Key("messaging.operation") ++ ++ // MessagingBatchMessageCountKey is the attribute Key conforming to the ++ // "messaging.batch.message_count" semantic conventions. It represents the ++ // number of messages sent, received, or processed in the scope of the ++ // batching operation. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the span describes an ++ // operation on a batch of messages.) ++ // Stability: stable ++ // Examples: 0, 1, 2 ++ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on ++ // spans that operate with a single message. When a messaging client ++ // library supports both batch and single-message API for the same ++ // operation, instrumentations SHOULD use `messaging.batch.message_count` ++ // for batching APIs and SHOULD NOT use it for single-message APIs. ++ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") ++) ++ ++var ( ++ // publish ++ MessagingOperationPublish = MessagingOperationKey.String("publish") ++ // receive ++ MessagingOperationReceive = MessagingOperationKey.String("receive") ++ // process ++ MessagingOperationProcess = MessagingOperationKey.String("process") ++) ++ ++// MessagingSystem returns an attribute KeyValue conforming to the ++// "messaging.system" semantic conventions. It represents a string identifying ++// the messaging system. ++func MessagingSystem(val string) attribute.KeyValue { ++ return MessagingSystemKey.String(val) ++} ++ ++// MessagingBatchMessageCount returns an attribute KeyValue conforming to ++// the "messaging.batch.message_count" semantic conventions. It represents the ++// number of messages sent, received, or processed in the scope of the batching ++// operation. ++func MessagingBatchMessageCount(val int) attribute.KeyValue { ++ return MessagingBatchMessageCountKey.Int(val) ++} ++ ++// Semantic convention for a consumer of messages received from a messaging ++// system ++const ( ++ // MessagingConsumerIDKey is the attribute Key conforming to the ++ // "messaging.consumer.id" semantic conventions. It represents the ++ // identifier for the consumer receiving a message. For Kafka, set it to ++ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if ++ // both are present, or only `messaging.kafka.consumer.group`. For brokers, ++ // such as RabbitMQ and Artemis, set it to the `client_id` of the client ++ // consuming the message. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'mygroup - client-6' ++ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") ++) ++ ++// MessagingConsumerID returns an attribute KeyValue conforming to the ++// "messaging.consumer.id" semantic conventions. It represents the identifier ++// for the consumer receiving a message. For Kafka, set it to ++// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both ++// are present, or only `messaging.kafka.consumer.group`. For brokers, such as ++// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the ++// message. ++func MessagingConsumerID(val string) attribute.KeyValue { ++ return MessagingConsumerIDKey.String(val) ++} ++ ++// Attributes for RabbitMQ ++const ( ++ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key ++ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic ++ // conventions. It represents the rabbitMQ message routing key. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If not empty.) ++ // Stability: stable ++ // Examples: 'myKey' ++ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") ++) ++ ++// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue ++// conforming to the "messaging.rabbitmq.destination.routing_key" semantic ++// conventions. It represents the rabbitMQ message routing key. ++func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { ++ return MessagingRabbitmqDestinationRoutingKeyKey.String(val) ++} ++ ++// Attributes for Apache Kafka ++const ( ++ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the ++ // "messaging.kafka.message.key" semantic conventions. It represents the ++ // message keys in Kafka are used for grouping alike messages to ensure ++ // they're processed on the same partition. They differ from ++ // `messaging.message.id` in that they're not unique. If the key is `null`, ++ // the attribute MUST NOT be set. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'myKey' ++ // Note: If the key type is not string, it's string representation has to ++ // be supplied for the attribute. If the key has no unambiguous, canonical ++ // string form, don't include its value. ++ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") ++ ++ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the ++ // "messaging.kafka.consumer.group" semantic conventions. It represents the ++ // name of the Kafka Consumer Group that is handling the message. Only ++ // applies to consumers, not producers. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'my-group' ++ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") ++ ++ // MessagingKafkaClientIDKey is the attribute Key conforming to the ++ // "messaging.kafka.client_id" semantic conventions. It represents the ++ // client ID for the Consumer or Producer that is handling the message. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'client-5' ++ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") ++ ++ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to ++ // the "messaging.kafka.destination.partition" semantic conventions. It ++ // represents the partition the message is sent to. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2 ++ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") ++ ++ // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the ++ // "messaging.kafka.source.partition" semantic conventions. It represents ++ // the partition the message is received from. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2 ++ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") ++ ++ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the ++ // "messaging.kafka.message.offset" semantic conventions. It represents the ++ // offset of a record in the corresponding Kafka partition. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") ++ ++ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the ++ // "messaging.kafka.message.tombstone" semantic conventions. It represents ++ // a boolean that is true if the message is a tombstone. ++ // ++ // Type: boolean ++ // RequirementLevel: ConditionallyRequired (If value is `true`. When ++ // missing, the value is assumed to be `false`.) ++ // Stability: stable ++ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") ++) ++ ++// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the ++// "messaging.kafka.message.key" semantic conventions. It represents the ++// message keys in Kafka are used for grouping alike messages to ensure they're ++// processed on the same partition. They differ from `messaging.message.id` in ++// that they're not unique. If the key is `null`, the attribute MUST NOT be ++// set. ++func MessagingKafkaMessageKey(val string) attribute.KeyValue { ++ return MessagingKafkaMessageKeyKey.String(val) ++} ++ ++// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to ++// the "messaging.kafka.consumer.group" semantic conventions. It represents the ++// name of the Kafka Consumer Group that is handling the message. Only applies ++// to consumers, not producers. ++func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { ++ return MessagingKafkaConsumerGroupKey.String(val) ++} ++ ++// MessagingKafkaClientID returns an attribute KeyValue conforming to the ++// "messaging.kafka.client_id" semantic conventions. It represents the client ++// ID for the Consumer or Producer that is handling the message. ++func MessagingKafkaClientID(val string) attribute.KeyValue { ++ return MessagingKafkaClientIDKey.String(val) ++} ++ ++// MessagingKafkaDestinationPartition returns an attribute KeyValue ++// conforming to the "messaging.kafka.destination.partition" semantic ++// conventions. It represents the partition the message is sent to. ++func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { ++ return MessagingKafkaDestinationPartitionKey.Int(val) ++} ++ ++// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to ++// the "messaging.kafka.source.partition" semantic conventions. It represents ++// the partition the message is received from. ++func MessagingKafkaSourcePartition(val int) attribute.KeyValue { ++ return MessagingKafkaSourcePartitionKey.Int(val) ++} ++ ++// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to ++// the "messaging.kafka.message.offset" semantic conventions. It represents the ++// offset of a record in the corresponding Kafka partition. ++func MessagingKafkaMessageOffset(val int) attribute.KeyValue { ++ return MessagingKafkaMessageOffsetKey.Int(val) ++} ++ ++// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming ++// to the "messaging.kafka.message.tombstone" semantic conventions. It ++// represents a boolean that is true if the message is a tombstone. ++func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { ++ return MessagingKafkaMessageTombstoneKey.Bool(val) ++} ++ ++// Attributes for Apache RocketMQ ++const ( ++ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the ++ // "messaging.rocketmq.namespace" semantic conventions. It represents the ++ // namespace of RocketMQ resources, resources in different namespaces are ++ // individual. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myNamespace' ++ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") ++ ++ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the ++ // "messaging.rocketmq.client_group" semantic conventions. It represents ++ // the name of the RocketMQ producer/consumer group that is handling the ++ // message. The client type is identified by the SpanKind. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myConsumerGroup' ++ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") ++ ++ // MessagingRocketmqClientIDKey is the attribute Key conforming to the ++ // "messaging.rocketmq.client_id" semantic conventions. It represents the ++ // unique identifier for each client. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myhost@8742@s8083jm' ++ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") ++ ++ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key ++ // conforming to the "messaging.rocketmq.message.delivery_timestamp" ++ // semantic conventions. It represents the timestamp in milliseconds that ++ // the delay message is expected to be delivered to consumer. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the message type is delay ++ // and delay time level is not specified.) ++ // Stability: stable ++ // Examples: 1665987217045 ++ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") ++ ++ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key ++ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic ++ // conventions. It represents the delay time level for delay message, which ++ // determines the message delay time. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the message type is delay ++ // and delivery timestamp is not specified.) ++ // Stability: stable ++ // Examples: 3 ++ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") ++ ++ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.group" semantic conventions. It represents ++ // the it is essential for FIFO message. Messages that belong to the same ++ // message group are always processed one by one within the same consumer ++ // group. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) ++ // Stability: stable ++ // Examples: 'myMessageGroup' ++ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") ++ ++ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.type" semantic conventions. It represents ++ // the type of message. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") ++ ++ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.tag" semantic conventions. It represents the ++ // secondary classifier of message besides topic. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'tagA' ++ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") ++ ++ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.keys" semantic conventions. It represents ++ // the key(s) of message, another way to mark message besides message id. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'keyA', 'keyB' ++ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") ++ ++ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to ++ // the "messaging.rocketmq.consumption_model" semantic conventions. It ++ // represents the model of message consumption. This only applies to ++ // consumer spans. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ++) ++ ++var ( ++ // Normal message ++ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") ++ // FIFO message ++ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") ++ // Delay message ++ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") ++ // Transaction message ++ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ++) ++ ++var ( ++ // Clustering consumption model ++ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") ++ // Broadcasting consumption model ++ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ++) ++ ++// MessagingRocketmqNamespace returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.namespace" semantic conventions. It represents the ++// namespace of RocketMQ resources, resources in different namespaces are ++// individual. ++func MessagingRocketmqNamespace(val string) attribute.KeyValue { ++ return MessagingRocketmqNamespaceKey.String(val) ++} ++ ++// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.client_group" semantic conventions. It represents ++// the name of the RocketMQ producer/consumer group that is handling the ++// message. The client type is identified by the SpanKind. ++func MessagingRocketmqClientGroup(val string) attribute.KeyValue { ++ return MessagingRocketmqClientGroupKey.String(val) ++} ++ ++// MessagingRocketmqClientID returns an attribute KeyValue conforming to the ++// "messaging.rocketmq.client_id" semantic conventions. It represents the ++// unique identifier for each client. ++func MessagingRocketmqClientID(val string) attribute.KeyValue { ++ return MessagingRocketmqClientIDKey.String(val) ++} ++ ++// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue ++// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic ++// conventions. It represents the timestamp in milliseconds that the delay ++// message is expected to be delivered to consumer. ++func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { ++ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) ++} ++ ++// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue ++// conforming to the "messaging.rocketmq.message.delay_time_level" semantic ++// conventions. It represents the delay time level for delay message, which ++// determines the message delay time. ++func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { ++ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) ++} ++ ++// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.group" semantic conventions. It represents ++// the it is essential for FIFO message. Messages that belong to the same ++// message group are always processed one by one within the same consumer ++// group. ++func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { ++ return MessagingRocketmqMessageGroupKey.String(val) ++} ++ ++// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.tag" semantic conventions. It represents the ++// secondary classifier of message besides topic. ++func MessagingRocketmqMessageTag(val string) attribute.KeyValue { ++ return MessagingRocketmqMessageTagKey.String(val) ++} ++ ++// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.keys" semantic conventions. It represents ++// the key(s) of message, another way to mark message besides message id. ++func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { ++ return MessagingRocketmqMessageKeysKey.StringSlice(val) ++} ++ ++// Semantic conventions for remote procedure calls. ++const ( ++ // RPCSystemKey is the attribute Key conforming to the "rpc.system" ++ // semantic conventions. It represents a string identifying the remoting ++ // system. See below for a list of well-known identifiers. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ RPCSystemKey = attribute.Key("rpc.system") ++ ++ // RPCServiceKey is the attribute Key conforming to the "rpc.service" ++ // semantic conventions. It represents the full (logical) name of the ++ // service being called, including its package name, if applicable. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'myservice.EchoService' ++ // Note: This is the logical name of the service from the RPC interface ++ // perspective, which can be different from the name of any implementing ++ // class. The `code.namespace` attribute may be used to store the latter ++ // (despite the attribute name, it may include a class name; e.g., class ++ // with method actually executing the call on the server side, RPC client ++ // stub class on the client side). ++ RPCServiceKey = attribute.Key("rpc.service") ++ ++ // RPCMethodKey is the attribute Key conforming to the "rpc.method" ++ // semantic conventions. It represents the name of the (logical) method ++ // being called, must be equal to the $method part in the span name. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'exampleMethod' ++ // Note: This is the logical name of the method from the RPC interface ++ // perspective, which can be different from the name of any implementing ++ // method/function. The `code.function` attribute may be used to store the ++ // latter (e.g., method actually executing the call on the server side, RPC ++ // client stub method on the client side). ++ RPCMethodKey = attribute.Key("rpc.method") ++) ++ ++var ( ++ // gRPC ++ RPCSystemGRPC = RPCSystemKey.String("grpc") ++ // Java RMI ++ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") ++ // .NET WCF ++ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") ++ // Apache Dubbo ++ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ++) ++ ++// RPCService returns an attribute KeyValue conforming to the "rpc.service" ++// semantic conventions. It represents the full (logical) name of the service ++// being called, including its package name, if applicable. ++func RPCService(val string) attribute.KeyValue { ++ return RPCServiceKey.String(val) ++} ++ ++// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" ++// semantic conventions. It represents the name of the (logical) method being ++// called, must be equal to the $method part in the span name. ++func RPCMethod(val string) attribute.KeyValue { ++ return RPCMethodKey.String(val) ++} ++ ++// Tech-specific attributes for gRPC. ++const ( ++ // RPCGRPCStatusCodeKey is the attribute Key conforming to the ++ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric ++ // status ++ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of ++ // the gRPC request. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ++) ++ ++var ( ++ // OK ++ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) ++ // CANCELLED ++ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) ++ // UNKNOWN ++ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) ++ // INVALID_ARGUMENT ++ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) ++ // DEADLINE_EXCEEDED ++ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) ++ // NOT_FOUND ++ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) ++ // ALREADY_EXISTS ++ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) ++ // PERMISSION_DENIED ++ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) ++ // RESOURCE_EXHAUSTED ++ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) ++ // FAILED_PRECONDITION ++ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) ++ // ABORTED ++ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) ++ // OUT_OF_RANGE ++ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) ++ // UNIMPLEMENTED ++ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) ++ // INTERNAL ++ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) ++ // UNAVAILABLE ++ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) ++ // DATA_LOSS ++ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) ++ // UNAUTHENTICATED ++ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ++) ++ ++// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). ++const ( ++ // RPCJsonrpcVersionKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol ++ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 ++ // does not specify this, the value can be omitted. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If other than the default ++ // version (`1.0`)) ++ // Stability: stable ++ // Examples: '2.0', '1.0' ++ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") ++ ++ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` ++ // property of request or response. Since protocol allows id to be int, ++ // string, `null` or missing (for notifications), value is expected to be ++ // cast to string for simplicity. Use empty string in case of `null` value. ++ // Omit entirely if this is a notification. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '10', 'request-7', '' ++ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") ++ ++ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.error_code" semantic conventions. It represents the ++ // `error.code` property of response if it is an error response. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If response is not successful.) ++ // Stability: stable ++ // Examples: -32700, 100 ++ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") ++ ++ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.error_message" semantic conventions. It represents the ++ // `error.message` property of response if it is an error response. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Parse error', 'User already exists' ++ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ++) ++ ++// RPCJsonrpcVersion returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.version" semantic conventions. It represents the protocol ++// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 ++// does not specify this, the value can be omitted. ++func RPCJsonrpcVersion(val string) attribute.KeyValue { ++ return RPCJsonrpcVersionKey.String(val) ++} ++ ++// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` ++// property of request or response. Since protocol allows id to be int, string, ++// `null` or missing (for notifications), value is expected to be cast to ++// string for simplicity. Use empty string in case of `null` value. Omit ++// entirely if this is a notification. ++func RPCJsonrpcRequestID(val string) attribute.KeyValue { ++ return RPCJsonrpcRequestIDKey.String(val) ++} ++ ++// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.error_code" semantic conventions. It represents the ++// `error.code` property of response if it is an error response. ++func RPCJsonrpcErrorCode(val int) attribute.KeyValue { ++ return RPCJsonrpcErrorCodeKey.Int(val) ++} ++ ++// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.error_message" semantic conventions. It represents the ++// `error.message` property of response if it is an error response. ++func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { ++ return RPCJsonrpcErrorMessageKey.String(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go +new file mode 100644 +index 00000000000..e6cf8951053 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go +@@ -0,0 +1,1877 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// These attributes may be used to describe the client in a connection-based ++// network interaction where there is one side that initiates the connection ++// (the client is the side that initiates the connection). This covers all TCP ++// network interactions since TCP is connection-based and one side initiates ++// the connection (an exception is made for peer-to-peer communication over TCP ++// where the "user-facing" surface of the protocol / API does not expose a ++// clear notion of client and server). This also covers UDP network ++// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) ++// and DNS. ++const ( ++ // ClientAddressKey is the attribute Key conforming to the "client.address" ++ // semantic conventions. It represents the client address - unix domain ++ // socket name, IPv4 or IPv6 address. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/tmp/my.sock', '10.1.2.80' ++ // Note: When observed from the server side, and when communicating through ++ // an intermediary, `client.address` SHOULD represent client address behind ++ // any intermediaries (e.g. proxies) if it's available. ++ ClientAddressKey = attribute.Key("client.address") ++ ++ // ClientPortKey is the attribute Key conforming to the "client.port" ++ // semantic conventions. It represents the client port number ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 65123 ++ // Note: When observed from the server side, and when communicating through ++ // an intermediary, `client.port` SHOULD represent client port behind any ++ // intermediaries (e.g. proxies) if it's available. ++ ClientPortKey = attribute.Key("client.port") ++ ++ // ClientSocketAddressKey is the attribute Key conforming to the ++ // "client.socket.address" semantic conventions. It represents the ++ // immediate client peer address - unix domain socket name, IPv4 or IPv6 ++ // address. ++ // ++ // Type: string ++ // RequirementLevel: Recommended (If different than `client.address`.) ++ // Stability: stable ++ // Examples: '/tmp/my.sock', '127.0.0.1' ++ ClientSocketAddressKey = attribute.Key("client.socket.address") ++ ++ // ClientSocketPortKey is the attribute Key conforming to the ++ // "client.socket.port" semantic conventions. It represents the immediate ++ // client peer port number ++ // ++ // Type: int ++ // RequirementLevel: Recommended (If different than `client.port`.) ++ // Stability: stable ++ // Examples: 35555 ++ ClientSocketPortKey = attribute.Key("client.socket.port") ++) ++ ++// ClientAddress returns an attribute KeyValue conforming to the ++// "client.address" semantic conventions. It represents the client address - ++// unix domain socket name, IPv4 or IPv6 address. ++func ClientAddress(val string) attribute.KeyValue { ++ return ClientAddressKey.String(val) ++} ++ ++// ClientPort returns an attribute KeyValue conforming to the "client.port" ++// semantic conventions. It represents the client port number ++func ClientPort(val int) attribute.KeyValue { ++ return ClientPortKey.Int(val) ++} ++ ++// ClientSocketAddress returns an attribute KeyValue conforming to the ++// "client.socket.address" semantic conventions. It represents the immediate ++// client peer address - unix domain socket name, IPv4 or IPv6 address. ++func ClientSocketAddress(val string) attribute.KeyValue { ++ return ClientSocketAddressKey.String(val) ++} ++ ++// ClientSocketPort returns an attribute KeyValue conforming to the ++// "client.socket.port" semantic conventions. It represents the immediate ++// client peer port number ++func ClientSocketPort(val int) attribute.KeyValue { ++ return ClientSocketPortKey.Int(val) ++} ++ ++// Describes deprecated HTTP attributes. ++const ( ++ // HTTPMethodKey is the attribute Key conforming to the "http.method" ++ // semantic conventions. It represents the deprecated, use ++ // `http.request.method` instead. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'GET', 'POST', 'HEAD' ++ HTTPMethodKey = attribute.Key("http.method") ++ ++ // HTTPStatusCodeKey is the attribute Key conforming to the ++ // "http.status_code" semantic conventions. It represents the deprecated, ++ // use `http.response.status_code` instead. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 200 ++ HTTPStatusCodeKey = attribute.Key("http.status_code") ++ ++ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" ++ // semantic conventions. It represents the deprecated, use `url.scheme` ++ // instead. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'http', 'https' ++ HTTPSchemeKey = attribute.Key("http.scheme") ++ ++ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic ++ // conventions. It represents the deprecated, use `url.full` instead. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' ++ HTTPURLKey = attribute.Key("http.url") ++ ++ // HTTPTargetKey is the attribute Key conforming to the "http.target" ++ // semantic conventions. It represents the deprecated, use `url.path` and ++ // `url.query` instead. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '/search?q=OpenTelemetry#SemConv' ++ HTTPTargetKey = attribute.Key("http.target") ++ ++ // HTTPRequestContentLengthKey is the attribute Key conforming to the ++ // "http.request_content_length" semantic conventions. It represents the ++ // deprecated, use `http.request.body.size` instead. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 3495 ++ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") ++ ++ // HTTPResponseContentLengthKey is the attribute Key conforming to the ++ // "http.response_content_length" semantic conventions. It represents the ++ // deprecated, use `http.response.body.size` instead. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 3495 ++ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ++) ++ ++// HTTPMethod returns an attribute KeyValue conforming to the "http.method" ++// semantic conventions. It represents the deprecated, use ++// `http.request.method` instead. ++func HTTPMethod(val string) attribute.KeyValue { ++ return HTTPMethodKey.String(val) ++} ++ ++// HTTPStatusCode returns an attribute KeyValue conforming to the ++// "http.status_code" semantic conventions. It represents the deprecated, use ++// `http.response.status_code` instead. ++func HTTPStatusCode(val int) attribute.KeyValue { ++ return HTTPStatusCodeKey.Int(val) ++} ++ ++// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" ++// semantic conventions. It represents the deprecated, use `url.scheme` ++// instead. ++func HTTPScheme(val string) attribute.KeyValue { ++ return HTTPSchemeKey.String(val) ++} ++ ++// HTTPURL returns an attribute KeyValue conforming to the "http.url" ++// semantic conventions. It represents the deprecated, use `url.full` instead. ++func HTTPURL(val string) attribute.KeyValue { ++ return HTTPURLKey.String(val) ++} ++ ++// HTTPTarget returns an attribute KeyValue conforming to the "http.target" ++// semantic conventions. It represents the deprecated, use `url.path` and ++// `url.query` instead. ++func HTTPTarget(val string) attribute.KeyValue { ++ return HTTPTargetKey.String(val) ++} ++ ++// HTTPRequestContentLength returns an attribute KeyValue conforming to the ++// "http.request_content_length" semantic conventions. It represents the ++// deprecated, use `http.request.body.size` instead. ++func HTTPRequestContentLength(val int) attribute.KeyValue { ++ return HTTPRequestContentLengthKey.Int(val) ++} ++ ++// HTTPResponseContentLength returns an attribute KeyValue conforming to the ++// "http.response_content_length" semantic conventions. It represents the ++// deprecated, use `http.response.body.size` instead. ++func HTTPResponseContentLength(val int) attribute.KeyValue { ++ return HTTPResponseContentLengthKey.Int(val) ++} ++ ++// These attributes may be used for any network related operation. ++const ( ++ // NetSockPeerNameKey is the attribute Key conforming to the ++ // "net.sock.peer.name" semantic conventions. It represents the deprecated, ++ // use `server.socket.domain` on client spans. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '/var/my.sock' ++ NetSockPeerNameKey = attribute.Key("net.sock.peer.name") ++ ++ // NetSockPeerAddrKey is the attribute Key conforming to the ++ // "net.sock.peer.addr" semantic conventions. It represents the deprecated, ++ // use `server.socket.address` on client spans and `client.socket.address` ++ // on server spans. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '192.168.0.1' ++ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") ++ ++ // NetSockPeerPortKey is the attribute Key conforming to the ++ // "net.sock.peer.port" semantic conventions. It represents the deprecated, ++ // use `server.socket.port` on client spans and `client.socket.port` on ++ // server spans. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 65531 ++ NetSockPeerPortKey = attribute.Key("net.sock.peer.port") ++ ++ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" ++ // semantic conventions. It represents the deprecated, use `server.address` ++ // on client spans and `client.address` on server spans. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'example.com' ++ NetPeerNameKey = attribute.Key("net.peer.name") ++ ++ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" ++ // semantic conventions. It represents the deprecated, use `server.port` on ++ // client spans and `client.port` on server spans. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 8080 ++ NetPeerPortKey = attribute.Key("net.peer.port") ++ ++ // NetHostNameKey is the attribute Key conforming to the "net.host.name" ++ // semantic conventions. It represents the deprecated, use ++ // `server.address`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'example.com' ++ NetHostNameKey = attribute.Key("net.host.name") ++ ++ // NetHostPortKey is the attribute Key conforming to the "net.host.port" ++ // semantic conventions. It represents the deprecated, use `server.port`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 8080 ++ NetHostPortKey = attribute.Key("net.host.port") ++ ++ // NetSockHostAddrKey is the attribute Key conforming to the ++ // "net.sock.host.addr" semantic conventions. It represents the deprecated, ++ // use `server.socket.address`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '/var/my.sock' ++ NetSockHostAddrKey = attribute.Key("net.sock.host.addr") ++ ++ // NetSockHostPortKey is the attribute Key conforming to the ++ // "net.sock.host.port" semantic conventions. It represents the deprecated, ++ // use `server.socket.port`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 8080 ++ NetSockHostPortKey = attribute.Key("net.sock.host.port") ++ ++ // NetTransportKey is the attribute Key conforming to the "net.transport" ++ // semantic conventions. It represents the deprecated, use ++ // `network.transport`. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ NetTransportKey = attribute.Key("net.transport") ++ ++ // NetProtocolNameKey is the attribute Key conforming to the ++ // "net.protocol.name" semantic conventions. It represents the deprecated, ++ // use `network.protocol.name`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'amqp', 'http', 'mqtt' ++ NetProtocolNameKey = attribute.Key("net.protocol.name") ++ ++ // NetProtocolVersionKey is the attribute Key conforming to the ++ // "net.protocol.version" semantic conventions. It represents the ++ // deprecated, use `network.protocol.version`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '3.1.1' ++ NetProtocolVersionKey = attribute.Key("net.protocol.version") ++ ++ // NetSockFamilyKey is the attribute Key conforming to the ++ // "net.sock.family" semantic conventions. It represents the deprecated, ++ // use `network.transport` and `network.type`. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ NetSockFamilyKey = attribute.Key("net.sock.family") ++) ++ ++var ( ++ // ip_tcp ++ NetTransportTCP = NetTransportKey.String("ip_tcp") ++ // ip_udp ++ NetTransportUDP = NetTransportKey.String("ip_udp") ++ // Named or anonymous pipe ++ NetTransportPipe = NetTransportKey.String("pipe") ++ // In-process communication ++ NetTransportInProc = NetTransportKey.String("inproc") ++ // Something else (non IP-based) ++ NetTransportOther = NetTransportKey.String("other") ++) ++ ++var ( ++ // IPv4 address ++ NetSockFamilyInet = NetSockFamilyKey.String("inet") ++ // IPv6 address ++ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") ++ // Unix domain socket path ++ NetSockFamilyUnix = NetSockFamilyKey.String("unix") ++) ++ ++// NetSockPeerName returns an attribute KeyValue conforming to the ++// "net.sock.peer.name" semantic conventions. It represents the deprecated, use ++// `server.socket.domain` on client spans. ++func NetSockPeerName(val string) attribute.KeyValue { ++ return NetSockPeerNameKey.String(val) ++} ++ ++// NetSockPeerAddr returns an attribute KeyValue conforming to the ++// "net.sock.peer.addr" semantic conventions. It represents the deprecated, use ++// `server.socket.address` on client spans and `client.socket.address` on ++// server spans. ++func NetSockPeerAddr(val string) attribute.KeyValue { ++ return NetSockPeerAddrKey.String(val) ++} ++ ++// NetSockPeerPort returns an attribute KeyValue conforming to the ++// "net.sock.peer.port" semantic conventions. It represents the deprecated, use ++// `server.socket.port` on client spans and `client.socket.port` on server ++// spans. ++func NetSockPeerPort(val int) attribute.KeyValue { ++ return NetSockPeerPortKey.Int(val) ++} ++ ++// NetPeerName returns an attribute KeyValue conforming to the ++// "net.peer.name" semantic conventions. It represents the deprecated, use ++// `server.address` on client spans and `client.address` on server spans. ++func NetPeerName(val string) attribute.KeyValue { ++ return NetPeerNameKey.String(val) ++} ++ ++// NetPeerPort returns an attribute KeyValue conforming to the ++// "net.peer.port" semantic conventions. It represents the deprecated, use ++// `server.port` on client spans and `client.port` on server spans. ++func NetPeerPort(val int) attribute.KeyValue { ++ return NetPeerPortKey.Int(val) ++} ++ ++// NetHostName returns an attribute KeyValue conforming to the ++// "net.host.name" semantic conventions. It represents the deprecated, use ++// `server.address`. ++func NetHostName(val string) attribute.KeyValue { ++ return NetHostNameKey.String(val) ++} ++ ++// NetHostPort returns an attribute KeyValue conforming to the ++// "net.host.port" semantic conventions. It represents the deprecated, use ++// `server.port`. ++func NetHostPort(val int) attribute.KeyValue { ++ return NetHostPortKey.Int(val) ++} ++ ++// NetSockHostAddr returns an attribute KeyValue conforming to the ++// "net.sock.host.addr" semantic conventions. It represents the deprecated, use ++// `server.socket.address`. ++func NetSockHostAddr(val string) attribute.KeyValue { ++ return NetSockHostAddrKey.String(val) ++} ++ ++// NetSockHostPort returns an attribute KeyValue conforming to the ++// "net.sock.host.port" semantic conventions. It represents the deprecated, use ++// `server.socket.port`. ++func NetSockHostPort(val int) attribute.KeyValue { ++ return NetSockHostPortKey.Int(val) ++} ++ ++// NetProtocolName returns an attribute KeyValue conforming to the ++// "net.protocol.name" semantic conventions. It represents the deprecated, use ++// `network.protocol.name`. ++func NetProtocolName(val string) attribute.KeyValue { ++ return NetProtocolNameKey.String(val) ++} ++ ++// NetProtocolVersion returns an attribute KeyValue conforming to the ++// "net.protocol.version" semantic conventions. It represents the deprecated, ++// use `network.protocol.version`. ++func NetProtocolVersion(val string) attribute.KeyValue { ++ return NetProtocolVersionKey.String(val) ++} ++ ++// These attributes may be used to describe the receiver of a network ++// exchange/packet. These should be used when there is no client/server ++// relationship between the two sides, or when that relationship is unknown. ++// This covers low-level network interactions (e.g. packet tracing) where you ++// don't know if there was a connection or which side initiated it. This also ++// covers unidirectional UDP flows and peer-to-peer communication where the ++// "user-facing" surface of the protocol / API does not expose a clear notion ++// of client and server. ++const ( ++ // DestinationDomainKey is the attribute Key conforming to the ++ // "destination.domain" semantic conventions. It represents the domain name ++ // of the destination system. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'foo.example.com' ++ // Note: This value may be a host name, a fully qualified domain name, or ++ // another host naming format. ++ DestinationDomainKey = attribute.Key("destination.domain") ++ ++ // DestinationAddressKey is the attribute Key conforming to the ++ // "destination.address" semantic conventions. It represents the peer ++ // address, for example IP address or UNIX socket name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '10.5.3.2' ++ DestinationAddressKey = attribute.Key("destination.address") ++ ++ // DestinationPortKey is the attribute Key conforming to the ++ // "destination.port" semantic conventions. It represents the peer port ++ // number ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3389, 2888 ++ DestinationPortKey = attribute.Key("destination.port") ++) ++ ++// DestinationDomain returns an attribute KeyValue conforming to the ++// "destination.domain" semantic conventions. It represents the domain name of ++// the destination system. ++func DestinationDomain(val string) attribute.KeyValue { ++ return DestinationDomainKey.String(val) ++} ++ ++// DestinationAddress returns an attribute KeyValue conforming to the ++// "destination.address" semantic conventions. It represents the peer address, ++// for example IP address or UNIX socket name. ++func DestinationAddress(val string) attribute.KeyValue { ++ return DestinationAddressKey.String(val) ++} ++ ++// DestinationPort returns an attribute KeyValue conforming to the ++// "destination.port" semantic conventions. It represents the peer port number ++func DestinationPort(val int) attribute.KeyValue { ++ return DestinationPortKey.Int(val) ++} ++ ++// Describes HTTP attributes. ++const ( ++ // HTTPRequestMethodKey is the attribute Key conforming to the ++ // "http.request.method" semantic conventions. It represents the hTTP ++ // request method. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'GET', 'POST', 'HEAD' ++ // Note: HTTP request method value SHOULD be "known" to the ++ // instrumentation. ++ // By default, this convention defines "known" methods as the ones listed ++ // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) ++ // and the PATCH method defined in ++ // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). ++ // ++ // If the HTTP request method is not known to instrumentation, it MUST set ++ // the `http.request.method` attribute to `_OTHER` and, except if reporting ++ // a metric, MUST ++ // set the exact method received in the request line as value of the ++ // `http.request.method_original` attribute. ++ // ++ // If the HTTP instrumentation could end up converting valid HTTP request ++ // methods to `_OTHER`, then it MUST provide a way to override ++ // the list of known HTTP methods. If this override is done via environment ++ // variable, then the environment variable MUST be named ++ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated ++ // list of case-sensitive known HTTP methods ++ // (this list MUST be a full override of the default known method, it is ++ // not a list of known methods in addition to the defaults). ++ // ++ // HTTP method names are case-sensitive and `http.request.method` attribute ++ // value MUST match a known HTTP method name exactly. ++ // Instrumentations for specific web frameworks that consider HTTP methods ++ // to be case insensitive, SHOULD populate a canonical equivalent. ++ // Tracing instrumentations that do so, MUST also set ++ // `http.request.method_original` to the original value. ++ HTTPRequestMethodKey = attribute.Key("http.request.method") ++ ++ // HTTPResponseStatusCodeKey is the attribute Key conforming to the ++ // "http.response.status_code" semantic conventions. It represents the ++ // [HTTP response status ++ // code](https://tools.ietf.org/html/rfc7231#section-6). ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If and only if one was ++ // received/sent.) ++ // Stability: stable ++ // Examples: 200 ++ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") ++) ++ ++var ( ++ // CONNECT method ++ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") ++ // DELETE method ++ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") ++ // GET method ++ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") ++ // HEAD method ++ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") ++ // OPTIONS method ++ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") ++ // PATCH method ++ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") ++ // POST method ++ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") ++ // PUT method ++ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") ++ // TRACE method ++ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") ++ // Any HTTP method that the instrumentation has no prior knowledge of ++ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") ++) ++ ++// HTTPResponseStatusCode returns an attribute KeyValue conforming to the ++// "http.response.status_code" semantic conventions. It represents the [HTTP ++// response status code](https://tools.ietf.org/html/rfc7231#section-6). ++func HTTPResponseStatusCode(val int) attribute.KeyValue { ++ return HTTPResponseStatusCodeKey.Int(val) ++} ++ ++// HTTP Server attributes ++const ( ++ // HTTPRouteKey is the attribute Key conforming to the "http.route" ++ // semantic conventions. It represents the matched route (path template in ++ // the format used by the respective server framework). See note below ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If and only if it's available) ++ // Stability: stable ++ // Examples: '/users/:userID?', '{controller}/{action}/{id?}' ++ // Note: MUST NOT be populated when this is not supported by the HTTP ++ // server framework as the route attribute should have low-cardinality and ++ // the URI path can NOT substitute it. ++ // SHOULD include the [application ++ // root](/docs/http/http-spans.md#http-server-definitions) if there is one. ++ HTTPRouteKey = attribute.Key("http.route") ++) ++ ++// HTTPRoute returns an attribute KeyValue conforming to the "http.route" ++// semantic conventions. It represents the matched route (path template in the ++// format used by the respective server framework). See note below ++func HTTPRoute(val string) attribute.KeyValue { ++ return HTTPRouteKey.String(val) ++} ++ ++// Attributes for Events represented using Log Records. ++const ( ++ // EventNameKey is the attribute Key conforming to the "event.name" ++ // semantic conventions. It represents the name identifies the event. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'click', 'exception' ++ EventNameKey = attribute.Key("event.name") ++ ++ // EventDomainKey is the attribute Key conforming to the "event.domain" ++ // semantic conventions. It represents the domain identifies the business ++ // context for the events. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: Events across different domains may have same `event.name`, yet be ++ // unrelated events. ++ EventDomainKey = attribute.Key("event.domain") ++) ++ ++var ( ++ // Events from browser apps ++ EventDomainBrowser = EventDomainKey.String("browser") ++ // Events from mobile apps ++ EventDomainDevice = EventDomainKey.String("device") ++ // Events from Kubernetes ++ EventDomainK8S = EventDomainKey.String("k8s") ++) ++ ++// EventName returns an attribute KeyValue conforming to the "event.name" ++// semantic conventions. It represents the name identifies the event. ++func EventName(val string) attribute.KeyValue { ++ return EventNameKey.String(val) ++} ++ ++// The attributes described in this section are rather generic. They may be ++// used in any Log Record they apply to. ++const ( ++ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" ++ // semantic conventions. It represents a unique identifier for the Log ++ // Record. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' ++ // Note: If an id is provided, other log records with the same id will be ++ // considered duplicates and can be removed safely. This means, that two ++ // distinguishable log records MUST have different values. ++ // The id MAY be an [Universally Unique Lexicographically Sortable ++ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers ++ // (e.g. UUID) may be used as needed. ++ LogRecordUIDKey = attribute.Key("log.record.uid") ++) ++ ++// LogRecordUID returns an attribute KeyValue conforming to the ++// "log.record.uid" semantic conventions. It represents a unique identifier for ++// the Log Record. ++func LogRecordUID(val string) attribute.KeyValue { ++ return LogRecordUIDKey.String(val) ++} ++ ++// Describes Log attributes ++const ( ++ // LogIostreamKey is the attribute Key conforming to the "log.iostream" ++ // semantic conventions. It represents the stream associated with the log. ++ // See below for a list of well-known values. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ LogIostreamKey = attribute.Key("log.iostream") ++) ++ ++var ( ++ // Logs from stdout stream ++ LogIostreamStdout = LogIostreamKey.String("stdout") ++ // Events from stderr stream ++ LogIostreamStderr = LogIostreamKey.String("stderr") ++) ++ ++// A file to which log was emitted. ++const ( ++ // LogFileNameKey is the attribute Key conforming to the "log.file.name" ++ // semantic conventions. It represents the basename of the file. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'audit.log' ++ LogFileNameKey = attribute.Key("log.file.name") ++ ++ // LogFilePathKey is the attribute Key conforming to the "log.file.path" ++ // semantic conventions. It represents the full path to the file. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/var/log/mysql/audit.log' ++ LogFilePathKey = attribute.Key("log.file.path") ++ ++ // LogFileNameResolvedKey is the attribute Key conforming to the ++ // "log.file.name_resolved" semantic conventions. It represents the ++ // basename of the file, with symlinks resolved. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'uuid.log' ++ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") ++ ++ // LogFilePathResolvedKey is the attribute Key conforming to the ++ // "log.file.path_resolved" semantic conventions. It represents the full ++ // path to the file, with symlinks resolved. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/var/lib/docker/uuid.log' ++ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") ++) ++ ++// LogFileName returns an attribute KeyValue conforming to the ++// "log.file.name" semantic conventions. It represents the basename of the ++// file. ++func LogFileName(val string) attribute.KeyValue { ++ return LogFileNameKey.String(val) ++} ++ ++// LogFilePath returns an attribute KeyValue conforming to the ++// "log.file.path" semantic conventions. It represents the full path to the ++// file. ++func LogFilePath(val string) attribute.KeyValue { ++ return LogFilePathKey.String(val) ++} ++ ++// LogFileNameResolved returns an attribute KeyValue conforming to the ++// "log.file.name_resolved" semantic conventions. It represents the basename of ++// the file, with symlinks resolved. ++func LogFileNameResolved(val string) attribute.KeyValue { ++ return LogFileNameResolvedKey.String(val) ++} ++ ++// LogFilePathResolved returns an attribute KeyValue conforming to the ++// "log.file.path_resolved" semantic conventions. It represents the full path ++// to the file, with symlinks resolved. ++func LogFilePathResolved(val string) attribute.KeyValue { ++ return LogFilePathResolvedKey.String(val) ++} ++ ++// Describes JVM memory metric attributes. ++const ( ++ // TypeKey is the attribute Key conforming to the "type" semantic ++ // conventions. It represents the type of memory. ++ // ++ // Type: Enum ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'heap', 'non_heap' ++ TypeKey = attribute.Key("type") ++ ++ // PoolKey is the attribute Key conforming to the "pool" semantic ++ // conventions. It represents the name of the memory pool. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' ++ // Note: Pool names are generally obtained via ++ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). ++ PoolKey = attribute.Key("pool") ++) ++ ++var ( ++ // Heap memory ++ TypeHeap = TypeKey.String("heap") ++ // Non-heap memory ++ TypeNonHeap = TypeKey.String("non_heap") ++) ++ ++// Pool returns an attribute KeyValue conforming to the "pool" semantic ++// conventions. It represents the name of the memory pool. ++func Pool(val string) attribute.KeyValue { ++ return PoolKey.String(val) ++} ++ ++// These attributes may be used to describe the server in a connection-based ++// network interaction where there is one side that initiates the connection ++// (the client is the side that initiates the connection). This covers all TCP ++// network interactions since TCP is connection-based and one side initiates ++// the connection (an exception is made for peer-to-peer communication over TCP ++// where the "user-facing" surface of the protocol / API does not expose a ++// clear notion of client and server). This also covers UDP network ++// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) ++// and DNS. ++const ( ++ // ServerAddressKey is the attribute Key conforming to the "server.address" ++ // semantic conventions. It represents the logical server hostname, matches ++ // server FQDN if available, and IP or socket address if FQDN is not known. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'example.com' ++ ServerAddressKey = attribute.Key("server.address") ++ ++ // ServerPortKey is the attribute Key conforming to the "server.port" ++ // semantic conventions. It represents the logical server port number ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 80, 8080, 443 ++ ServerPortKey = attribute.Key("server.port") ++ ++ // ServerSocketDomainKey is the attribute Key conforming to the ++ // "server.socket.domain" semantic conventions. It represents the domain ++ // name of an immediate peer. ++ // ++ // Type: string ++ // RequirementLevel: Recommended (If different than `server.address`.) ++ // Stability: stable ++ // Examples: 'proxy.example.com' ++ // Note: Typically observed from the client side, and represents a proxy or ++ // other intermediary domain name. ++ ServerSocketDomainKey = attribute.Key("server.socket.domain") ++ ++ // ServerSocketAddressKey is the attribute Key conforming to the ++ // "server.socket.address" semantic conventions. It represents the physical ++ // server IP address or Unix socket address. If set from the client, should ++ // simply use the socket's peer address, and not attempt to find any actual ++ // server IP (i.e., if set from client, this may represent some proxy ++ // server instead of the logical server). ++ // ++ // Type: string ++ // RequirementLevel: Recommended (If different than `server.address`.) ++ // Stability: stable ++ // Examples: '10.5.3.2' ++ ServerSocketAddressKey = attribute.Key("server.socket.address") ++ ++ // ServerSocketPortKey is the attribute Key conforming to the ++ // "server.socket.port" semantic conventions. It represents the physical ++ // server port. ++ // ++ // Type: int ++ // RequirementLevel: Recommended (If different than `server.port`.) ++ // Stability: stable ++ // Examples: 16456 ++ ServerSocketPortKey = attribute.Key("server.socket.port") ++) ++ ++// ServerAddress returns an attribute KeyValue conforming to the ++// "server.address" semantic conventions. It represents the logical server ++// hostname, matches server FQDN if available, and IP or socket address if FQDN ++// is not known. ++func ServerAddress(val string) attribute.KeyValue { ++ return ServerAddressKey.String(val) ++} ++ ++// ServerPort returns an attribute KeyValue conforming to the "server.port" ++// semantic conventions. It represents the logical server port number ++func ServerPort(val int) attribute.KeyValue { ++ return ServerPortKey.Int(val) ++} ++ ++// ServerSocketDomain returns an attribute KeyValue conforming to the ++// "server.socket.domain" semantic conventions. It represents the domain name ++// of an immediate peer. ++func ServerSocketDomain(val string) attribute.KeyValue { ++ return ServerSocketDomainKey.String(val) ++} ++ ++// ServerSocketAddress returns an attribute KeyValue conforming to the ++// "server.socket.address" semantic conventions. It represents the physical ++// server IP address or Unix socket address. If set from the client, should ++// simply use the socket's peer address, and not attempt to find any actual ++// server IP (i.e., if set from client, this may represent some proxy server ++// instead of the logical server). ++func ServerSocketAddress(val string) attribute.KeyValue { ++ return ServerSocketAddressKey.String(val) ++} ++ ++// ServerSocketPort returns an attribute KeyValue conforming to the ++// "server.socket.port" semantic conventions. It represents the physical server ++// port. ++func ServerSocketPort(val int) attribute.KeyValue { ++ return ServerSocketPortKey.Int(val) ++} ++ ++// These attributes may be used to describe the sender of a network ++// exchange/packet. These should be used when there is no client/server ++// relationship between the two sides, or when that relationship is unknown. ++// This covers low-level network interactions (e.g. packet tracing) where you ++// don't know if there was a connection or which side initiated it. This also ++// covers unidirectional UDP flows and peer-to-peer communication where the ++// "user-facing" surface of the protocol / API does not expose a clear notion ++// of client and server. ++const ( ++ // SourceDomainKey is the attribute Key conforming to the "source.domain" ++ // semantic conventions. It represents the domain name of the source ++ // system. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'foo.example.com' ++ // Note: This value may be a host name, a fully qualified domain name, or ++ // another host naming format. ++ SourceDomainKey = attribute.Key("source.domain") ++ ++ // SourceAddressKey is the attribute Key conforming to the "source.address" ++ // semantic conventions. It represents the source address, for example IP ++ // address or Unix socket name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '10.5.3.2' ++ SourceAddressKey = attribute.Key("source.address") ++ ++ // SourcePortKey is the attribute Key conforming to the "source.port" ++ // semantic conventions. It represents the source port number ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3389, 2888 ++ SourcePortKey = attribute.Key("source.port") ++) ++ ++// SourceDomain returns an attribute KeyValue conforming to the ++// "source.domain" semantic conventions. It represents the domain name of the ++// source system. ++func SourceDomain(val string) attribute.KeyValue { ++ return SourceDomainKey.String(val) ++} ++ ++// SourceAddress returns an attribute KeyValue conforming to the ++// "source.address" semantic conventions. It represents the source address, for ++// example IP address or Unix socket name. ++func SourceAddress(val string) attribute.KeyValue { ++ return SourceAddressKey.String(val) ++} ++ ++// SourcePort returns an attribute KeyValue conforming to the "source.port" ++// semantic conventions. It represents the source port number ++func SourcePort(val int) attribute.KeyValue { ++ return SourcePortKey.Int(val) ++} ++ ++// These attributes may be used for any network related operation. ++const ( ++ // NetworkTransportKey is the attribute Key conforming to the ++ // "network.transport" semantic conventions. It represents the [OSI ++ // Transport Layer](https://osi-model.com/transport-layer/) or ++ // [Inter-process Communication ++ // method](https://en.wikipedia.org/wiki/Inter-process_communication). The ++ // value SHOULD be normalized to lowercase. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'tcp', 'udp' ++ NetworkTransportKey = attribute.Key("network.transport") ++ ++ // NetworkTypeKey is the attribute Key conforming to the "network.type" ++ // semantic conventions. It represents the [OSI Network ++ // Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The ++ // value SHOULD be normalized to lowercase. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'ipv4', 'ipv6' ++ NetworkTypeKey = attribute.Key("network.type") ++ ++ // NetworkProtocolNameKey is the attribute Key conforming to the ++ // "network.protocol.name" semantic conventions. It represents the [OSI ++ // Application Layer](https://osi-model.com/application-layer/) or non-OSI ++ // equivalent. The value SHOULD be normalized to lowercase. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'amqp', 'http', 'mqtt' ++ NetworkProtocolNameKey = attribute.Key("network.protocol.name") ++ ++ // NetworkProtocolVersionKey is the attribute Key conforming to the ++ // "network.protocol.version" semantic conventions. It represents the ++ // version of the application layer protocol used. See note below. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '3.1.1' ++ // Note: `network.protocol.version` refers to the version of the protocol ++ // used and might be different from the protocol client's version. If the ++ // HTTP client used has a version of `0.27.2`, but sends HTTP version ++ // `1.1`, this attribute should be set to `1.1`. ++ NetworkProtocolVersionKey = attribute.Key("network.protocol.version") ++) ++ ++var ( ++ // TCP ++ NetworkTransportTCP = NetworkTransportKey.String("tcp") ++ // UDP ++ NetworkTransportUDP = NetworkTransportKey.String("udp") ++ // Named or anonymous pipe. See note below ++ NetworkTransportPipe = NetworkTransportKey.String("pipe") ++ // Unix domain socket ++ NetworkTransportUnix = NetworkTransportKey.String("unix") ++) ++ ++var ( ++ // IPv4 ++ NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") ++ // IPv6 ++ NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") ++) ++ ++// NetworkProtocolName returns an attribute KeyValue conforming to the ++// "network.protocol.name" semantic conventions. It represents the [OSI ++// Application Layer](https://osi-model.com/application-layer/) or non-OSI ++// equivalent. The value SHOULD be normalized to lowercase. ++func NetworkProtocolName(val string) attribute.KeyValue { ++ return NetworkProtocolNameKey.String(val) ++} ++ ++// NetworkProtocolVersion returns an attribute KeyValue conforming to the ++// "network.protocol.version" semantic conventions. It represents the version ++// of the application layer protocol used. See note below. ++func NetworkProtocolVersion(val string) attribute.KeyValue { ++ return NetworkProtocolVersionKey.String(val) ++} ++ ++// These attributes may be used for any network related operation. ++const ( ++ // NetworkConnectionTypeKey is the attribute Key conforming to the ++ // "network.connection.type" semantic conventions. It represents the ++ // internet connection type. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'wifi' ++ NetworkConnectionTypeKey = attribute.Key("network.connection.type") ++ ++ // NetworkConnectionSubtypeKey is the attribute Key conforming to the ++ // "network.connection.subtype" semantic conventions. It represents the ++ // this describes more details regarding the connection.type. It may be the ++ // type of cell technology connection, but it could be used for describing ++ // details about a wifi connection. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'LTE' ++ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") ++ ++ // NetworkCarrierNameKey is the attribute Key conforming to the ++ // "network.carrier.name" semantic conventions. It represents the name of ++ // the mobile carrier. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'sprint' ++ NetworkCarrierNameKey = attribute.Key("network.carrier.name") ++ ++ // NetworkCarrierMccKey is the attribute Key conforming to the ++ // "network.carrier.mcc" semantic conventions. It represents the mobile ++ // carrier country code. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '310' ++ NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") ++ ++ // NetworkCarrierMncKey is the attribute Key conforming to the ++ // "network.carrier.mnc" semantic conventions. It represents the mobile ++ // carrier network code. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '001' ++ NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") ++ ++ // NetworkCarrierIccKey is the attribute Key conforming to the ++ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 ++ // alpha-2 2-character country code associated with the mobile carrier ++ // network. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'DE' ++ NetworkCarrierIccKey = attribute.Key("network.carrier.icc") ++) ++ ++var ( ++ // wifi ++ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") ++ // wired ++ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") ++ // cell ++ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") ++ // unavailable ++ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") ++ // unknown ++ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") ++) ++ ++var ( ++ // GPRS ++ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") ++ // EDGE ++ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") ++ // UMTS ++ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") ++ // CDMA ++ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") ++ // EVDO Rel. 0 ++ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") ++ // EVDO Rev. A ++ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") ++ // CDMA2000 1XRTT ++ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") ++ // HSDPA ++ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") ++ // HSUPA ++ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") ++ // HSPA ++ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") ++ // IDEN ++ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") ++ // EVDO Rev. B ++ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") ++ // LTE ++ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") ++ // EHRPD ++ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") ++ // HSPAP ++ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") ++ // GSM ++ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") ++ // TD-SCDMA ++ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") ++ // IWLAN ++ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") ++ // 5G NR (New Radio) ++ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") ++ // 5G NRNSA (New Radio Non-Standalone) ++ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") ++ // LTE CA ++ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") ++) ++ ++// NetworkCarrierName returns an attribute KeyValue conforming to the ++// "network.carrier.name" semantic conventions. It represents the name of the ++// mobile carrier. ++func NetworkCarrierName(val string) attribute.KeyValue { ++ return NetworkCarrierNameKey.String(val) ++} ++ ++// NetworkCarrierMcc returns an attribute KeyValue conforming to the ++// "network.carrier.mcc" semantic conventions. It represents the mobile carrier ++// country code. ++func NetworkCarrierMcc(val string) attribute.KeyValue { ++ return NetworkCarrierMccKey.String(val) ++} ++ ++// NetworkCarrierMnc returns an attribute KeyValue conforming to the ++// "network.carrier.mnc" semantic conventions. It represents the mobile carrier ++// network code. ++func NetworkCarrierMnc(val string) attribute.KeyValue { ++ return NetworkCarrierMncKey.String(val) ++} ++ ++// NetworkCarrierIcc returns an attribute KeyValue conforming to the ++// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 ++// alpha-2 2-character country code associated with the mobile carrier network. ++func NetworkCarrierIcc(val string) attribute.KeyValue { ++ return NetworkCarrierIccKey.String(val) ++} ++ ++// Semantic conventions for HTTP client and server Spans. ++const ( ++ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the ++ // "http.request.method_original" semantic conventions. It represents the ++ // original HTTP method sent by the client in the request line. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If and only if it's different ++ // than `http.request.method`.) ++ // Stability: stable ++ // Examples: 'GeT', 'ACL', 'foo' ++ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") ++ ++ // HTTPRequestBodySizeKey is the attribute Key conforming to the ++ // "http.request.body.size" semantic conventions. It represents the size of ++ // the request payload body in bytes. This is the number of bytes ++ // transferred excluding headers and is often, but not always, present as ++ // the ++ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++ // header. For requests using transport encoding, this should be the ++ // compressed size. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3495 ++ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") ++ ++ // HTTPResponseBodySizeKey is the attribute Key conforming to the ++ // "http.response.body.size" semantic conventions. It represents the size ++ // of the response payload body in bytes. This is the number of bytes ++ // transferred excluding headers and is often, but not always, present as ++ // the ++ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++ // header. For requests using transport encoding, this should be the ++ // compressed size. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3495 ++ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") ++) ++ ++// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the ++// "http.request.method_original" semantic conventions. It represents the ++// original HTTP method sent by the client in the request line. ++func HTTPRequestMethodOriginal(val string) attribute.KeyValue { ++ return HTTPRequestMethodOriginalKey.String(val) ++} ++ ++// HTTPRequestBodySize returns an attribute KeyValue conforming to the ++// "http.request.body.size" semantic conventions. It represents the size of the ++// request payload body in bytes. This is the number of bytes transferred ++// excluding headers and is often, but not always, present as the ++// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++// header. For requests using transport encoding, this should be the compressed ++// size. ++func HTTPRequestBodySize(val int) attribute.KeyValue { ++ return HTTPRequestBodySizeKey.Int(val) ++} ++ ++// HTTPResponseBodySize returns an attribute KeyValue conforming to the ++// "http.response.body.size" semantic conventions. It represents the size of ++// the response payload body in bytes. This is the number of bytes transferred ++// excluding headers and is often, but not always, present as the ++// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++// header. For requests using transport encoding, this should be the compressed ++// size. ++func HTTPResponseBodySize(val int) attribute.KeyValue { ++ return HTTPResponseBodySizeKey.Int(val) ++} ++ ++// Semantic convention describing per-message attributes populated on messaging ++// spans or links. ++const ( ++ // MessagingMessageIDKey is the attribute Key conforming to the ++ // "messaging.message.id" semantic conventions. It represents a value used ++ // by the messaging system as an identifier for the message, represented as ++ // a string. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '452a7c7c7c7048c2f887f61572b18fc2' ++ MessagingMessageIDKey = attribute.Key("messaging.message.id") ++ ++ // MessagingMessageConversationIDKey is the attribute Key conforming to the ++ // "messaging.message.conversation_id" semantic conventions. It represents ++ // the [conversation ID](#conversations) identifying the conversation to ++ // which the message belongs, represented as a string. Sometimes called ++ // "Correlation ID". ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MyConversationID' ++ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") ++ ++ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to ++ // the "messaging.message.payload_size_bytes" semantic conventions. It ++ // represents the (uncompressed) size of the message payload in bytes. Also ++ // use this attribute if it is unknown whether the compressed or ++ // uncompressed payload size is reported. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2738 ++ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") ++ ++ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key ++ // conforming to the "messaging.message.payload_compressed_size_bytes" ++ // semantic conventions. It represents the compressed size of the message ++ // payload in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2048 ++ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") ++) ++ ++// MessagingMessageID returns an attribute KeyValue conforming to the ++// "messaging.message.id" semantic conventions. It represents a value used by ++// the messaging system as an identifier for the message, represented as a ++// string. ++func MessagingMessageID(val string) attribute.KeyValue { ++ return MessagingMessageIDKey.String(val) ++} ++ ++// MessagingMessageConversationID returns an attribute KeyValue conforming ++// to the "messaging.message.conversation_id" semantic conventions. It ++// represents the [conversation ID](#conversations) identifying the ++// conversation to which the message belongs, represented as a string. ++// Sometimes called "Correlation ID". ++func MessagingMessageConversationID(val string) attribute.KeyValue { ++ return MessagingMessageConversationIDKey.String(val) ++} ++ ++// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming ++// to the "messaging.message.payload_size_bytes" semantic conventions. It ++// represents the (uncompressed) size of the message payload in bytes. Also use ++// this attribute if it is unknown whether the compressed or uncompressed ++// payload size is reported. ++func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { ++ return MessagingMessagePayloadSizeBytesKey.Int(val) ++} ++ ++// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue ++// conforming to the "messaging.message.payload_compressed_size_bytes" semantic ++// conventions. It represents the compressed size of the message payload in ++// bytes. ++func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { ++ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) ++} ++ ++// Semantic convention for attributes that describe messaging destination on ++// broker ++const ( ++ // MessagingDestinationNameKey is the attribute Key conforming to the ++ // "messaging.destination.name" semantic conventions. It represents the ++ // message destination name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MyQueue', 'MyTopic' ++ // Note: Destination name SHOULD uniquely identify a specific queue, topic ++ // or other entity within the broker. If ++ // the broker does not have such notion, the destination name SHOULD ++ // uniquely identify the broker. ++ MessagingDestinationNameKey = attribute.Key("messaging.destination.name") ++ ++ // MessagingDestinationTemplateKey is the attribute Key conforming to the ++ // "messaging.destination.template" semantic conventions. It represents the ++ // low cardinality representation of the messaging destination name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/customers/{customerID}' ++ // Note: Destination names could be constructed from templates. An example ++ // would be a destination name involving a user name or product id. ++ // Although the destination name in this case is of high cardinality, the ++ // underlying template is of low cardinality and can be effectively used ++ // for grouping and aggregation. ++ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") ++ ++ // MessagingDestinationTemporaryKey is the attribute Key conforming to the ++ // "messaging.destination.temporary" semantic conventions. It represents a ++ // boolean that is true if the message destination is temporary and might ++ // not exist anymore after messages are processed. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") ++ ++ // MessagingDestinationAnonymousKey is the attribute Key conforming to the ++ // "messaging.destination.anonymous" semantic conventions. It represents a ++ // boolean that is true if the message destination is anonymous (could be ++ // unnamed or have auto-generated name). ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") ++) ++ ++// MessagingDestinationName returns an attribute KeyValue conforming to the ++// "messaging.destination.name" semantic conventions. It represents the message ++// destination name ++func MessagingDestinationName(val string) attribute.KeyValue { ++ return MessagingDestinationNameKey.String(val) ++} ++ ++// MessagingDestinationTemplate returns an attribute KeyValue conforming to ++// the "messaging.destination.template" semantic conventions. It represents the ++// low cardinality representation of the messaging destination name ++func MessagingDestinationTemplate(val string) attribute.KeyValue { ++ return MessagingDestinationTemplateKey.String(val) ++} ++ ++// MessagingDestinationTemporary returns an attribute KeyValue conforming to ++// the "messaging.destination.temporary" semantic conventions. It represents a ++// boolean that is true if the message destination is temporary and might not ++// exist anymore after messages are processed. ++func MessagingDestinationTemporary(val bool) attribute.KeyValue { ++ return MessagingDestinationTemporaryKey.Bool(val) ++} ++ ++// MessagingDestinationAnonymous returns an attribute KeyValue conforming to ++// the "messaging.destination.anonymous" semantic conventions. It represents a ++// boolean that is true if the message destination is anonymous (could be ++// unnamed or have auto-generated name). ++func MessagingDestinationAnonymous(val bool) attribute.KeyValue { ++ return MessagingDestinationAnonymousKey.Bool(val) ++} ++ ++// Attributes for RabbitMQ ++const ( ++ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key ++ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic ++ // conventions. It represents the rabbitMQ message routing key. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If not empty.) ++ // Stability: stable ++ // Examples: 'myKey' ++ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") ++) ++ ++// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue ++// conforming to the "messaging.rabbitmq.destination.routing_key" semantic ++// conventions. It represents the rabbitMQ message routing key. ++func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { ++ return MessagingRabbitmqDestinationRoutingKeyKey.String(val) ++} ++ ++// Attributes for Apache Kafka ++const ( ++ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the ++ // "messaging.kafka.message.key" semantic conventions. It represents the ++ // message keys in Kafka are used for grouping alike messages to ensure ++ // they're processed on the same partition. They differ from ++ // `messaging.message.id` in that they're not unique. If the key is `null`, ++ // the attribute MUST NOT be set. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'myKey' ++ // Note: If the key type is not string, it's string representation has to ++ // be supplied for the attribute. If the key has no unambiguous, canonical ++ // string form, don't include its value. ++ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") ++ ++ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the ++ // "messaging.kafka.consumer.group" semantic conventions. It represents the ++ // name of the Kafka Consumer Group that is handling the message. Only ++ // applies to consumers, not producers. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'my-group' ++ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") ++ ++ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to ++ // the "messaging.kafka.destination.partition" semantic conventions. It ++ // represents the partition the message is sent to. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2 ++ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") ++ ++ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the ++ // "messaging.kafka.message.offset" semantic conventions. It represents the ++ // offset of a record in the corresponding Kafka partition. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") ++ ++ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the ++ // "messaging.kafka.message.tombstone" semantic conventions. It represents ++ // a boolean that is true if the message is a tombstone. ++ // ++ // Type: boolean ++ // RequirementLevel: ConditionallyRequired (If value is `true`. When ++ // missing, the value is assumed to be `false`.) ++ // Stability: stable ++ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") ++) ++ ++// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the ++// "messaging.kafka.message.key" semantic conventions. It represents the ++// message keys in Kafka are used for grouping alike messages to ensure they're ++// processed on the same partition. They differ from `messaging.message.id` in ++// that they're not unique. If the key is `null`, the attribute MUST NOT be ++// set. ++func MessagingKafkaMessageKey(val string) attribute.KeyValue { ++ return MessagingKafkaMessageKeyKey.String(val) ++} ++ ++// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to ++// the "messaging.kafka.consumer.group" semantic conventions. It represents the ++// name of the Kafka Consumer Group that is handling the message. Only applies ++// to consumers, not producers. ++func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { ++ return MessagingKafkaConsumerGroupKey.String(val) ++} ++ ++// MessagingKafkaDestinationPartition returns an attribute KeyValue ++// conforming to the "messaging.kafka.destination.partition" semantic ++// conventions. It represents the partition the message is sent to. ++func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { ++ return MessagingKafkaDestinationPartitionKey.Int(val) ++} ++ ++// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to ++// the "messaging.kafka.message.offset" semantic conventions. It represents the ++// offset of a record in the corresponding Kafka partition. ++func MessagingKafkaMessageOffset(val int) attribute.KeyValue { ++ return MessagingKafkaMessageOffsetKey.Int(val) ++} ++ ++// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming ++// to the "messaging.kafka.message.tombstone" semantic conventions. It ++// represents a boolean that is true if the message is a tombstone. ++func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { ++ return MessagingKafkaMessageTombstoneKey.Bool(val) ++} ++ ++// Attributes for Apache RocketMQ ++const ( ++ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the ++ // "messaging.rocketmq.namespace" semantic conventions. It represents the ++ // namespace of RocketMQ resources, resources in different namespaces are ++ // individual. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myNamespace' ++ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") ++ ++ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the ++ // "messaging.rocketmq.client_group" semantic conventions. It represents ++ // the name of the RocketMQ producer/consumer group that is handling the ++ // message. The client type is identified by the SpanKind. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myConsumerGroup' ++ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") ++ ++ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key ++ // conforming to the "messaging.rocketmq.message.delivery_timestamp" ++ // semantic conventions. It represents the timestamp in milliseconds that ++ // the delay message is expected to be delivered to consumer. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the message type is delay ++ // and delay time level is not specified.) ++ // Stability: stable ++ // Examples: 1665987217045 ++ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") ++ ++ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key ++ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic ++ // conventions. It represents the delay time level for delay message, which ++ // determines the message delay time. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the message type is delay ++ // and delivery timestamp is not specified.) ++ // Stability: stable ++ // Examples: 3 ++ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") ++ ++ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.group" semantic conventions. It represents ++ // the it is essential for FIFO message. Messages that belong to the same ++ // message group are always processed one by one within the same consumer ++ // group. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) ++ // Stability: stable ++ // Examples: 'myMessageGroup' ++ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") ++ ++ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.type" semantic conventions. It represents ++ // the type of message. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") ++ ++ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.tag" semantic conventions. It represents the ++ // secondary classifier of message besides topic. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'tagA' ++ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") ++ ++ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.keys" semantic conventions. It represents ++ // the key(s) of message, another way to mark message besides message id. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'keyA', 'keyB' ++ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") ++ ++ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to ++ // the "messaging.rocketmq.consumption_model" semantic conventions. It ++ // represents the model of message consumption. This only applies to ++ // consumer spans. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ++) ++ ++var ( ++ // Normal message ++ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") ++ // FIFO message ++ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") ++ // Delay message ++ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") ++ // Transaction message ++ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ++) ++ ++var ( ++ // Clustering consumption model ++ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") ++ // Broadcasting consumption model ++ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ++) ++ ++// MessagingRocketmqNamespace returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.namespace" semantic conventions. It represents the ++// namespace of RocketMQ resources, resources in different namespaces are ++// individual. ++func MessagingRocketmqNamespace(val string) attribute.KeyValue { ++ return MessagingRocketmqNamespaceKey.String(val) ++} ++ ++// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.client_group" semantic conventions. It represents ++// the name of the RocketMQ producer/consumer group that is handling the ++// message. The client type is identified by the SpanKind. ++func MessagingRocketmqClientGroup(val string) attribute.KeyValue { ++ return MessagingRocketmqClientGroupKey.String(val) ++} ++ ++// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue ++// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic ++// conventions. It represents the timestamp in milliseconds that the delay ++// message is expected to be delivered to consumer. ++func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { ++ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) ++} ++ ++// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue ++// conforming to the "messaging.rocketmq.message.delay_time_level" semantic ++// conventions. It represents the delay time level for delay message, which ++// determines the message delay time. ++func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { ++ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) ++} ++ ++// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.group" semantic conventions. It represents ++// the it is essential for FIFO message. Messages that belong to the same ++// message group are always processed one by one within the same consumer ++// group. ++func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { ++ return MessagingRocketmqMessageGroupKey.String(val) ++} ++ ++// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.tag" semantic conventions. It represents the ++// secondary classifier of message besides topic. ++func MessagingRocketmqMessageTag(val string) attribute.KeyValue { ++ return MessagingRocketmqMessageTagKey.String(val) ++} ++ ++// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.keys" semantic conventions. It represents ++// the key(s) of message, another way to mark message besides message id. ++func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { ++ return MessagingRocketmqMessageKeysKey.StringSlice(val) ++} ++ ++// Attributes describing URL. ++const ( ++ // URLSchemeKey is the attribute Key conforming to the "url.scheme" ++ // semantic conventions. It represents the [URI ++ // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component ++ // identifying the used protocol. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'https', 'ftp', 'telnet' ++ URLSchemeKey = attribute.Key("url.scheme") ++ ++ // URLFullKey is the attribute Key conforming to the "url.full" semantic ++ // conventions. It represents the absolute URL describing a network ++ // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', ++ // '//localhost' ++ // Note: For network calls, URL usually has ++ // `scheme://host[:port][path][?query][#fragment]` format, where the ++ // fragment is not transmitted over HTTP, but if it is known, it should be ++ // included nevertheless. ++ // `url.full` MUST NOT contain credentials passed via URL in form of ++ // `https://username:password@www.example.com/`. In such case username and ++ // password should be redacted and attribute's value should be ++ // `https://REDACTED:REDACTED@www.example.com/`. ++ // `url.full` SHOULD capture the absolute URL when it is available (or can ++ // be reconstructed) and SHOULD NOT be validated or modified except for ++ // sanitizing purposes. ++ URLFullKey = attribute.Key("url.full") ++ ++ // URLPathKey is the attribute Key conforming to the "url.path" semantic ++ // conventions. It represents the [URI ++ // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/search' ++ // Note: When missing, the value is assumed to be `/` ++ URLPathKey = attribute.Key("url.path") ++ ++ // URLQueryKey is the attribute Key conforming to the "url.query" semantic ++ // conventions. It represents the [URI ++ // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'q=OpenTelemetry' ++ // Note: Sensitive content provided in query string SHOULD be scrubbed when ++ // instrumentations can identify it. ++ URLQueryKey = attribute.Key("url.query") ++ ++ // URLFragmentKey is the attribute Key conforming to the "url.fragment" ++ // semantic conventions. It represents the [URI ++ // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'SemConv' ++ URLFragmentKey = attribute.Key("url.fragment") ++) ++ ++// URLScheme returns an attribute KeyValue conforming to the "url.scheme" ++// semantic conventions. It represents the [URI ++// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component ++// identifying the used protocol. ++func URLScheme(val string) attribute.KeyValue { ++ return URLSchemeKey.String(val) ++} ++ ++// URLFull returns an attribute KeyValue conforming to the "url.full" ++// semantic conventions. It represents the absolute URL describing a network ++// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) ++func URLFull(val string) attribute.KeyValue { ++ return URLFullKey.String(val) ++} ++ ++// URLPath returns an attribute KeyValue conforming to the "url.path" ++// semantic conventions. It represents the [URI ++// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component ++func URLPath(val string) attribute.KeyValue { ++ return URLPathKey.String(val) ++} ++ ++// URLQuery returns an attribute KeyValue conforming to the "url.query" ++// semantic conventions. It represents the [URI ++// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component ++func URLQuery(val string) attribute.KeyValue { ++ return URLQueryKey.String(val) ++} ++ ++// URLFragment returns an attribute KeyValue conforming to the ++// "url.fragment" semantic conventions. It represents the [URI ++// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component ++func URLFragment(val string) attribute.KeyValue { ++ return URLFragmentKey.String(val) ++} ++ ++// Describes user-agent attributes. ++const ( ++ // UserAgentOriginalKey is the attribute Key conforming to the ++ // "user_agent.original" semantic conventions. It represents the value of ++ // the [HTTP ++ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) ++ // header sent by the client. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' ++ UserAgentOriginalKey = attribute.Key("user_agent.original") ++) ++ ++// UserAgentOriginal returns an attribute KeyValue conforming to the ++// "user_agent.original" semantic conventions. It represents the value of the ++// [HTTP ++// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) ++// header sent by the client. ++func UserAgentOriginal(val string) attribute.KeyValue { ++ return UserAgentOriginalKey.String(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go +new file mode 100644 +index 00000000000..7cf424855e9 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package semconv implements OpenTelemetry semantic conventions. ++// ++// OpenTelemetry semantic conventions are agreed standardized naming ++// patterns for OpenTelemetry things. This package represents the conventions ++// as of the v1.21.0 version of the OpenTelemetry specification. ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go +new file mode 100644 +index 00000000000..30ae34fe478 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go +@@ -0,0 +1,199 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// This semantic convention defines the attributes used to represent a feature ++// flag evaluation as an event. ++const ( ++ // FeatureFlagKeyKey is the attribute Key conforming to the ++ // "feature_flag.key" semantic conventions. It represents the unique ++ // identifier of the feature flag. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'logo-color' ++ FeatureFlagKeyKey = attribute.Key("feature_flag.key") ++ ++ // FeatureFlagProviderNameKey is the attribute Key conforming to the ++ // "feature_flag.provider_name" semantic conventions. It represents the ++ // name of the service provider that performs the flag evaluation. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'Flag Manager' ++ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") ++ ++ // FeatureFlagVariantKey is the attribute Key conforming to the ++ // "feature_flag.variant" semantic conventions. It represents the sHOULD be ++ // a semantic identifier for a value. If one is unavailable, a stringified ++ // version of the value can be used. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'red', 'true', 'on' ++ // Note: A semantic identifier, commonly referred to as a variant, provides ++ // a means ++ // for referring to a value without including the value itself. This can ++ // provide additional context for understanding the meaning behind a value. ++ // For example, the variant `red` maybe be used for the value `#c05543`. ++ // ++ // A stringified version of the value can be used in situations where a ++ // semantic identifier is unavailable. String representation of the value ++ // should be determined by the implementer. ++ FeatureFlagVariantKey = attribute.Key("feature_flag.variant") ++) ++ ++// FeatureFlagKey returns an attribute KeyValue conforming to the ++// "feature_flag.key" semantic conventions. It represents the unique identifier ++// of the feature flag. ++func FeatureFlagKey(val string) attribute.KeyValue { ++ return FeatureFlagKeyKey.String(val) ++} ++ ++// FeatureFlagProviderName returns an attribute KeyValue conforming to the ++// "feature_flag.provider_name" semantic conventions. It represents the name of ++// the service provider that performs the flag evaluation. ++func FeatureFlagProviderName(val string) attribute.KeyValue { ++ return FeatureFlagProviderNameKey.String(val) ++} ++ ++// FeatureFlagVariant returns an attribute KeyValue conforming to the ++// "feature_flag.variant" semantic conventions. It represents the sHOULD be a ++// semantic identifier for a value. If one is unavailable, a stringified ++// version of the value can be used. ++func FeatureFlagVariant(val string) attribute.KeyValue { ++ return FeatureFlagVariantKey.String(val) ++} ++ ++// RPC received/sent message. ++const ( ++ // MessageTypeKey is the attribute Key conforming to the "message.type" ++ // semantic conventions. It represents the whether this is a received or ++ // sent message. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageTypeKey = attribute.Key("message.type") ++ ++ // MessageIDKey is the attribute Key conforming to the "message.id" ++ // semantic conventions. It represents the mUST be calculated as two ++ // different counters starting from `1` one for sent messages and one for ++ // received message. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: This way we guarantee that the values will be consistent between ++ // different implementations. ++ MessageIDKey = attribute.Key("message.id") ++ ++ // MessageCompressedSizeKey is the attribute Key conforming to the ++ // "message.compressed_size" semantic conventions. It represents the ++ // compressed size of the message in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageCompressedSizeKey = attribute.Key("message.compressed_size") ++ ++ // MessageUncompressedSizeKey is the attribute Key conforming to the ++ // "message.uncompressed_size" semantic conventions. It represents the ++ // uncompressed size of the message in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ++) ++ ++var ( ++ // sent ++ MessageTypeSent = MessageTypeKey.String("SENT") ++ // received ++ MessageTypeReceived = MessageTypeKey.String("RECEIVED") ++) ++ ++// MessageID returns an attribute KeyValue conforming to the "message.id" ++// semantic conventions. It represents the mUST be calculated as two different ++// counters starting from `1` one for sent messages and one for received ++// message. ++func MessageID(val int) attribute.KeyValue { ++ return MessageIDKey.Int(val) ++} ++ ++// MessageCompressedSize returns an attribute KeyValue conforming to the ++// "message.compressed_size" semantic conventions. It represents the compressed ++// size of the message in bytes. ++func MessageCompressedSize(val int) attribute.KeyValue { ++ return MessageCompressedSizeKey.Int(val) ++} ++ ++// MessageUncompressedSize returns an attribute KeyValue conforming to the ++// "message.uncompressed_size" semantic conventions. It represents the ++// uncompressed size of the message in bytes. ++func MessageUncompressedSize(val int) attribute.KeyValue { ++ return MessageUncompressedSizeKey.Int(val) ++} ++ ++// The attributes used to report a single exception associated with a span. ++const ( ++ // ExceptionEscapedKey is the attribute Key conforming to the ++ // "exception.escaped" semantic conventions. It represents the sHOULD be ++ // set to true if the exception event is recorded at a point where it is ++ // known that the exception is escaping the scope of the span. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: An exception is considered to have escaped (or left) the scope of ++ // a span, ++ // if that span is ended while the exception is still logically "in ++ // flight". ++ // This may be actually "in flight" in some languages (e.g. if the ++ // exception ++ // is passed to a Context manager's `__exit__` method in Python) but will ++ // usually be caught at the point of recording the exception in most ++ // languages. ++ // ++ // It is usually not possible to determine at the point where an exception ++ // is thrown ++ // whether it will escape the scope of a span. ++ // However, it is trivial to know that an exception ++ // will escape, if one checks for an active exception just before ending ++ // the span, ++ // as done in the [example above](#recording-an-exception). ++ // ++ // It follows that an exception may still escape the scope of the span ++ // even if the `exception.escaped` attribute was not set or set to false, ++ // since the event might have been recorded at a time where it was not ++ // clear whether the exception will escape. ++ ExceptionEscapedKey = attribute.Key("exception.escaped") ++) ++ ++// ExceptionEscaped returns an attribute KeyValue conforming to the ++// "exception.escaped" semantic conventions. It represents the sHOULD be set to ++// true if the exception event is recorded at a point where it is known that ++// the exception is escaping the scope of the span. ++func ExceptionEscaped(val bool) attribute.KeyValue { ++ return ExceptionEscapedKey.Bool(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go +new file mode 100644 +index 00000000000..93d3c1760c9 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++const ( ++ // ExceptionEventName is the name of the Span event representing an exception. ++ ExceptionEventName = "exception" ++) +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go +new file mode 100644 +index 00000000000..b6d8935cf97 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go +@@ -0,0 +1,2310 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// The web browser in which the application represented by the resource is ++// running. The `browser.*` attributes MUST be used only for resources that ++// represent applications running in a web browser (regardless of whether ++// running on a mobile or desktop device). ++const ( ++ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" ++ // semantic conventions. It represents the array of brand name and version ++ // separated by a space ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.brands`). ++ BrowserBrandsKey = attribute.Key("browser.brands") ++ ++ // BrowserPlatformKey is the attribute Key conforming to the ++ // "browser.platform" semantic conventions. It represents the platform on ++ // which the browser is running ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Windows', 'macOS', 'Android' ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.platform`). If unavailable, the legacy ++ // `navigator.platform` API SHOULD NOT be used instead and this attribute ++ // SHOULD be left unset in order for the values to be consistent. ++ // The list of possible values is defined in the [W3C User-Agent Client ++ // Hints ++ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). ++ // Note that some (but not all) of these values can overlap with values in ++ // the [`os.type` and `os.name` attributes](./os.md). However, for ++ // consistency, the values in the `browser.platform` attribute should ++ // capture the exact value that the user agent provides. ++ BrowserPlatformKey = attribute.Key("browser.platform") ++ ++ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" ++ // semantic conventions. It represents a boolean that is true if the ++ // browser is running on a mobile device ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.mobile`). If unavailable, this attribute ++ // SHOULD be left unset. ++ BrowserMobileKey = attribute.Key("browser.mobile") ++ ++ // BrowserLanguageKey is the attribute Key conforming to the ++ // "browser.language" semantic conventions. It represents the preferred ++ // language of the user using the browser ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'en', 'en-US', 'fr', 'fr-FR' ++ // Note: This value is intended to be taken from the Navigator API ++ // `navigator.language`. ++ BrowserLanguageKey = attribute.Key("browser.language") ++) ++ ++// BrowserBrands returns an attribute KeyValue conforming to the ++// "browser.brands" semantic conventions. It represents the array of brand name ++// and version separated by a space ++func BrowserBrands(val ...string) attribute.KeyValue { ++ return BrowserBrandsKey.StringSlice(val) ++} ++ ++// BrowserPlatform returns an attribute KeyValue conforming to the ++// "browser.platform" semantic conventions. It represents the platform on which ++// the browser is running ++func BrowserPlatform(val string) attribute.KeyValue { ++ return BrowserPlatformKey.String(val) ++} ++ ++// BrowserMobile returns an attribute KeyValue conforming to the ++// "browser.mobile" semantic conventions. It represents a boolean that is true ++// if the browser is running on a mobile device ++func BrowserMobile(val bool) attribute.KeyValue { ++ return BrowserMobileKey.Bool(val) ++} ++ ++// BrowserLanguage returns an attribute KeyValue conforming to the ++// "browser.language" semantic conventions. It represents the preferred ++// language of the user using the browser ++func BrowserLanguage(val string) attribute.KeyValue { ++ return BrowserLanguageKey.String(val) ++} ++ ++// A cloud environment (e.g. GCP, Azure, AWS) ++const ( ++ // CloudProviderKey is the attribute Key conforming to the "cloud.provider" ++ // semantic conventions. It represents the name of the cloud provider. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ CloudProviderKey = attribute.Key("cloud.provider") ++ ++ // CloudAccountIDKey is the attribute Key conforming to the ++ // "cloud.account.id" semantic conventions. It represents the cloud account ++ // ID the resource is assigned to. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '111111111111', 'opentelemetry' ++ CloudAccountIDKey = attribute.Key("cloud.account.id") ++ ++ // CloudRegionKey is the attribute Key conforming to the "cloud.region" ++ // semantic conventions. It represents the geographical region the resource ++ // is running. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-central1', 'us-east-1' ++ // Note: Refer to your provider's docs to see the available regions, for ++ // example [Alibaba Cloud ++ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS ++ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), ++ // [Azure ++ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), ++ // [Google Cloud regions](https://cloud.google.com/about/locations), or ++ // [Tencent Cloud ++ // regions](https://www.tencentcloud.com/document/product/213/6091). ++ CloudRegionKey = attribute.Key("cloud.region") ++ ++ // CloudResourceIDKey is the attribute Key conforming to the ++ // "cloud.resource_id" semantic conventions. It represents the cloud ++ // provider-specific native identifier of the monitored cloud resource ++ // (e.g. an ++ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) ++ // on AWS, a [fully qualified resource ++ // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) ++ // on Azure, a [full resource ++ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) ++ // on GCP) ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', ++ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', ++ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' ++ // Note: On some cloud providers, it may not be possible to determine the ++ // full ID at startup, ++ // so it may be necessary to set `cloud.resource_id` as a span attribute ++ // instead. ++ // ++ // The exact value to use for `cloud.resource_id` depends on the cloud ++ // provider. ++ // The following well-known definitions MUST be used if you set this ++ // attribute and they apply: ++ // ++ // * **AWS Lambda:** The function ++ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). ++ // Take care not to use the "invoked ARN" directly but replace any ++ // [alias ++ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) ++ // with the resolved function version, as the same runtime instance may ++ // be invokable with ++ // multiple different aliases. ++ // * **GCP:** The [URI of the ++ // resource](https://cloud.google.com/iam/docs/full-resource-names) ++ // * **Azure:** The [Fully Qualified Resource ++ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) ++ // of the invoked function, ++ // *not* the function app, having the form ++ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. ++ // This means that a span attribute MUST be used, as an Azure function ++ // app can host multiple functions that would usually share ++ // a TracerProvider. ++ CloudResourceIDKey = attribute.Key("cloud.resource_id") ++ ++ // CloudAvailabilityZoneKey is the attribute Key conforming to the ++ // "cloud.availability_zone" semantic conventions. It represents the cloud ++ // regions often have multiple, isolated locations known as zones to ++ // increase availability. Availability zone represents the zone where the ++ // resource is running. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-east-1c' ++ // Note: Availability zones are called "zones" on Alibaba Cloud and Google ++ // Cloud. ++ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") ++ ++ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" ++ // semantic conventions. It represents the cloud platform in use. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: The prefix of the service SHOULD match the one specified in ++ // `cloud.provider`. ++ CloudPlatformKey = attribute.Key("cloud.platform") ++) ++ ++var ( ++ // Alibaba Cloud ++ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") ++ // Amazon Web Services ++ CloudProviderAWS = CloudProviderKey.String("aws") ++ // Microsoft Azure ++ CloudProviderAzure = CloudProviderKey.String("azure") ++ // Google Cloud Platform ++ CloudProviderGCP = CloudProviderKey.String("gcp") ++ // Heroku Platform as a Service ++ CloudProviderHeroku = CloudProviderKey.String("heroku") ++ // IBM Cloud ++ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") ++ // Tencent Cloud ++ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ++) ++ ++var ( ++ // Alibaba Cloud Elastic Compute Service ++ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") ++ // Alibaba Cloud Function Compute ++ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") ++ // Red Hat OpenShift on Alibaba Cloud ++ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") ++ // AWS Elastic Compute Cloud ++ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") ++ // AWS Elastic Container Service ++ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") ++ // AWS Elastic Kubernetes Service ++ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") ++ // AWS Lambda ++ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") ++ // AWS Elastic Beanstalk ++ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") ++ // AWS App Runner ++ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") ++ // Red Hat OpenShift on AWS (ROSA) ++ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") ++ // Azure Virtual Machines ++ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") ++ // Azure Container Instances ++ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") ++ // Azure Kubernetes Service ++ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") ++ // Azure Functions ++ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") ++ // Azure App Service ++ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") ++ // Azure Red Hat OpenShift ++ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") ++ // Google Bare Metal Solution (BMS) ++ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") ++ // Google Cloud Compute Engine (GCE) ++ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") ++ // Google Cloud Run ++ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") ++ // Google Cloud Kubernetes Engine (GKE) ++ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") ++ // Google Cloud Functions (GCF) ++ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") ++ // Google Cloud App Engine (GAE) ++ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") ++ // Red Hat OpenShift on Google Cloud ++ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") ++ // Red Hat OpenShift on IBM Cloud ++ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") ++ // Tencent Cloud Cloud Virtual Machine (CVM) ++ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") ++ // Tencent Cloud Elastic Kubernetes Service (EKS) ++ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") ++ // Tencent Cloud Serverless Cloud Function (SCF) ++ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ++) ++ ++// CloudAccountID returns an attribute KeyValue conforming to the ++// "cloud.account.id" semantic conventions. It represents the cloud account ID ++// the resource is assigned to. ++func CloudAccountID(val string) attribute.KeyValue { ++ return CloudAccountIDKey.String(val) ++} ++ ++// CloudRegion returns an attribute KeyValue conforming to the ++// "cloud.region" semantic conventions. It represents the geographical region ++// the resource is running. ++func CloudRegion(val string) attribute.KeyValue { ++ return CloudRegionKey.String(val) ++} ++ ++// CloudResourceID returns an attribute KeyValue conforming to the ++// "cloud.resource_id" semantic conventions. It represents the cloud ++// provider-specific native identifier of the monitored cloud resource (e.g. an ++// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) ++// on AWS, a [fully qualified resource ++// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) ++// on Azure, a [full resource ++// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) ++// on GCP) ++func CloudResourceID(val string) attribute.KeyValue { ++ return CloudResourceIDKey.String(val) ++} ++ ++// CloudAvailabilityZone returns an attribute KeyValue conforming to the ++// "cloud.availability_zone" semantic conventions. It represents the cloud ++// regions often have multiple, isolated locations known as zones to increase ++// availability. Availability zone represents the zone where the resource is ++// running. ++func CloudAvailabilityZone(val string) attribute.KeyValue { ++ return CloudAvailabilityZoneKey.String(val) ++} ++ ++// Resources used by AWS Elastic Container Service (ECS). ++const ( ++ // AWSECSContainerARNKey is the attribute Key conforming to the ++ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon ++ // Resource Name (ARN) of an [ECS container ++ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' ++ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") ++ ++ // AWSECSClusterARNKey is the attribute Key conforming to the ++ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an ++ // [ECS ++ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' ++ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") ++ ++ // AWSECSLaunchtypeKey is the attribute Key conforming to the ++ // "aws.ecs.launchtype" semantic conventions. It represents the [launch ++ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) ++ // for an ECS task. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") ++ ++ // AWSECSTaskARNKey is the attribute Key conforming to the ++ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an ++ // [ECS task ++ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' ++ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") ++ ++ // AWSECSTaskFamilyKey is the attribute Key conforming to the ++ // "aws.ecs.task.family" semantic conventions. It represents the task ++ // definition family this task definition is a member of. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-family' ++ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") ++ ++ // AWSECSTaskRevisionKey is the attribute Key conforming to the ++ // "aws.ecs.task.revision" semantic conventions. It represents the revision ++ // for this task definition. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '8', '26' ++ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ++) ++ ++var ( ++ // ec2 ++ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") ++ // fargate ++ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ++) ++ ++// AWSECSContainerARN returns an attribute KeyValue conforming to the ++// "aws.ecs.container.arn" semantic conventions. It represents the Amazon ++// Resource Name (ARN) of an [ECS container ++// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). ++func AWSECSContainerARN(val string) attribute.KeyValue { ++ return AWSECSContainerARNKey.String(val) ++} ++ ++// AWSECSClusterARN returns an attribute KeyValue conforming to the ++// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS ++// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). ++func AWSECSClusterARN(val string) attribute.KeyValue { ++ return AWSECSClusterARNKey.String(val) ++} ++ ++// AWSECSTaskARN returns an attribute KeyValue conforming to the ++// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS ++// task ++// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). ++func AWSECSTaskARN(val string) attribute.KeyValue { ++ return AWSECSTaskARNKey.String(val) ++} ++ ++// AWSECSTaskFamily returns an attribute KeyValue conforming to the ++// "aws.ecs.task.family" semantic conventions. It represents the task ++// definition family this task definition is a member of. ++func AWSECSTaskFamily(val string) attribute.KeyValue { ++ return AWSECSTaskFamilyKey.String(val) ++} ++ ++// AWSECSTaskRevision returns an attribute KeyValue conforming to the ++// "aws.ecs.task.revision" semantic conventions. It represents the revision for ++// this task definition. ++func AWSECSTaskRevision(val string) attribute.KeyValue { ++ return AWSECSTaskRevisionKey.String(val) ++} ++ ++// Resources used by AWS Elastic Kubernetes Service (EKS). ++const ( ++ // AWSEKSClusterARNKey is the attribute Key conforming to the ++ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an ++ // EKS cluster. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' ++ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ++) ++ ++// AWSEKSClusterARN returns an attribute KeyValue conforming to the ++// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS ++// cluster. ++func AWSEKSClusterARN(val string) attribute.KeyValue { ++ return AWSEKSClusterARNKey.String(val) ++} ++ ++// Resources specific to Amazon Web Services. ++const ( ++ // AWSLogGroupNamesKey is the attribute Key conforming to the ++ // "aws.log.group.names" semantic conventions. It represents the name(s) of ++ // the AWS log group(s) an application is writing to. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/aws/lambda/my-function', 'opentelemetry-service' ++ // Note: Multiple log groups must be supported for cases like ++ // multi-container applications, where a single application has sidecar ++ // containers, and each write to their own log group. ++ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") ++ ++ // AWSLogGroupARNsKey is the attribute Key conforming to the ++ // "aws.log.group.arns" semantic conventions. It represents the Amazon ++ // Resource Name(s) (ARN) of the AWS log group(s). ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' ++ // Note: See the [log group ARN format ++ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). ++ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") ++ ++ // AWSLogStreamNamesKey is the attribute Key conforming to the ++ // "aws.log.stream.names" semantic conventions. It represents the name(s) ++ // of the AWS log stream(s) an application is writing to. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' ++ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") ++ ++ // AWSLogStreamARNsKey is the attribute Key conforming to the ++ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of ++ // the AWS log stream(s). ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' ++ // Note: See the [log stream ARN format ++ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). ++ // One log group can contain several log streams, so these ARNs necessarily ++ // identify both a log group and a log stream. ++ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ++) ++ ++// AWSLogGroupNames returns an attribute KeyValue conforming to the ++// "aws.log.group.names" semantic conventions. It represents the name(s) of the ++// AWS log group(s) an application is writing to. ++func AWSLogGroupNames(val ...string) attribute.KeyValue { ++ return AWSLogGroupNamesKey.StringSlice(val) ++} ++ ++// AWSLogGroupARNs returns an attribute KeyValue conforming to the ++// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource ++// Name(s) (ARN) of the AWS log group(s). ++func AWSLogGroupARNs(val ...string) attribute.KeyValue { ++ return AWSLogGroupARNsKey.StringSlice(val) ++} ++ ++// AWSLogStreamNames returns an attribute KeyValue conforming to the ++// "aws.log.stream.names" semantic conventions. It represents the name(s) of ++// the AWS log stream(s) an application is writing to. ++func AWSLogStreamNames(val ...string) attribute.KeyValue { ++ return AWSLogStreamNamesKey.StringSlice(val) ++} ++ ++// AWSLogStreamARNs returns an attribute KeyValue conforming to the ++// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the ++// AWS log stream(s). ++func AWSLogStreamARNs(val ...string) attribute.KeyValue { ++ return AWSLogStreamARNsKey.StringSlice(val) ++} ++ ++// Resource used by Google Cloud Run. ++const ( ++ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the ++ // "gcp.cloud_run.job.execution" semantic conventions. It represents the ++ // name of the Cloud Run ++ // [execution](https://cloud.google.com/run/docs/managing/job-executions) ++ // being run for the Job, as set by the ++ // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) ++ // environment variable. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'job-name-xxxx', 'sample-job-mdw84' ++ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") ++ ++ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the ++ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the ++ // index for a task within an execution as provided by the ++ // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) ++ // environment variable. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 0, 1 ++ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") ++) ++ ++// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the ++// "gcp.cloud_run.job.execution" semantic conventions. It represents the name ++// of the Cloud Run ++// [execution](https://cloud.google.com/run/docs/managing/job-executions) being ++// run for the Job, as set by the ++// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) ++// environment variable. ++func GCPCloudRunJobExecution(val string) attribute.KeyValue { ++ return GCPCloudRunJobExecutionKey.String(val) ++} ++ ++// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the ++// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index ++// for a task within an execution as provided by the ++// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) ++// environment variable. ++func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { ++ return GCPCloudRunJobTaskIndexKey.Int(val) ++} ++ ++// Resources used by Google Compute Engine (GCE). ++const ( ++ // GCPGceInstanceNameKey is the attribute Key conforming to the ++ // "gcp.gce.instance.name" semantic conventions. It represents the instance ++ // name of a GCE instance. This is the value provided by `host.name`, the ++ // visible name of the instance in the Cloud Console UI, and the prefix for ++ // the default hostname of the instance as defined by the [default internal ++ // DNS ++ // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'instance-1', 'my-vm-name' ++ GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") ++ ++ // GCPGceInstanceHostnameKey is the attribute Key conforming to the ++ // "gcp.gce.instance.hostname" semantic conventions. It represents the ++ // hostname of a GCE instance. This is the full value of the default or ++ // [custom ++ // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'my-host1234.example.com', ++ // 'sample-vm.us-west1-b.c.my-project.internal' ++ GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") ++) ++ ++// GCPGceInstanceName returns an attribute KeyValue conforming to the ++// "gcp.gce.instance.name" semantic conventions. It represents the instance ++// name of a GCE instance. This is the value provided by `host.name`, the ++// visible name of the instance in the Cloud Console UI, and the prefix for the ++// default hostname of the instance as defined by the [default internal DNS ++// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). ++func GCPGceInstanceName(val string) attribute.KeyValue { ++ return GCPGceInstanceNameKey.String(val) ++} ++ ++// GCPGceInstanceHostname returns an attribute KeyValue conforming to the ++// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname ++// of a GCE instance. This is the full value of the default or [custom ++// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). ++func GCPGceInstanceHostname(val string) attribute.KeyValue { ++ return GCPGceInstanceHostnameKey.String(val) ++} ++ ++// Heroku dyno metadata ++const ( ++ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the ++ // "heroku.release.creation_timestamp" semantic conventions. It represents ++ // the time and date the release was created ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2022-10-23T18:00:42Z' ++ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") ++ ++ // HerokuReleaseCommitKey is the attribute Key conforming to the ++ // "heroku.release.commit" semantic conventions. It represents the commit ++ // hash for the current release ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' ++ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") ++ ++ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" ++ // semantic conventions. It represents the unique identifier for the ++ // application ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' ++ HerokuAppIDKey = attribute.Key("heroku.app.id") ++) ++ ++// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming ++// to the "heroku.release.creation_timestamp" semantic conventions. It ++// represents the time and date the release was created ++func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { ++ return HerokuReleaseCreationTimestampKey.String(val) ++} ++ ++// HerokuReleaseCommit returns an attribute KeyValue conforming to the ++// "heroku.release.commit" semantic conventions. It represents the commit hash ++// for the current release ++func HerokuReleaseCommit(val string) attribute.KeyValue { ++ return HerokuReleaseCommitKey.String(val) ++} ++ ++// HerokuAppID returns an attribute KeyValue conforming to the ++// "heroku.app.id" semantic conventions. It represents the unique identifier ++// for the application ++func HerokuAppID(val string) attribute.KeyValue { ++ return HerokuAppIDKey.String(val) ++} ++ ++// A container instance. ++const ( ++ // ContainerNameKey is the attribute Key conforming to the "container.name" ++ // semantic conventions. It represents the container name used by container ++ // runtime. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-autoconf' ++ ContainerNameKey = attribute.Key("container.name") ++ ++ // ContainerIDKey is the attribute Key conforming to the "container.id" ++ // semantic conventions. It represents the container ID. Usually a UUID, as ++ // for example used to [identify Docker ++ // containers](https://docs.docker.com/engine/reference/run/#container-identification). ++ // The UUID might be abbreviated. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'a3bf90e006b2' ++ ContainerIDKey = attribute.Key("container.id") ++ ++ // ContainerRuntimeKey is the attribute Key conforming to the ++ // "container.runtime" semantic conventions. It represents the container ++ // runtime managing this container. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'docker', 'containerd', 'rkt' ++ ContainerRuntimeKey = attribute.Key("container.runtime") ++ ++ // ContainerImageNameKey is the attribute Key conforming to the ++ // "container.image.name" semantic conventions. It represents the name of ++ // the image the container was built on. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'gcr.io/opentelemetry/operator' ++ ContainerImageNameKey = attribute.Key("container.image.name") ++ ++ // ContainerImageTagKey is the attribute Key conforming to the ++ // "container.image.tag" semantic conventions. It represents the container ++ // image tag. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0.1' ++ ContainerImageTagKey = attribute.Key("container.image.tag") ++ ++ // ContainerImageIDKey is the attribute Key conforming to the ++ // "container.image.id" semantic conventions. It represents the runtime ++ // specific image identifier. Usually a hash algorithm followed by a UUID. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' ++ // Note: Docker defines a sha256 of the image id; `container.image.id` ++ // corresponds to the `Image` field from the Docker container inspect ++ // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) ++ // endpoint. ++ // K8S defines a link to the container registry repository with digest ++ // `"imageID": "registry.azurecr.io ++ // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. ++ // OCI defines a digest of manifest. ++ ContainerImageIDKey = attribute.Key("container.image.id") ++ ++ // ContainerCommandKey is the attribute Key conforming to the ++ // "container.command" semantic conventions. It represents the command used ++ // to run the container (i.e. the command name). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'otelcontribcol' ++ // Note: If using embedded credentials or sensitive data, it is recommended ++ // to remove them to prevent potential leakage. ++ ContainerCommandKey = attribute.Key("container.command") ++ ++ // ContainerCommandLineKey is the attribute Key conforming to the ++ // "container.command_line" semantic conventions. It represents the full ++ // command run by the container as a single string representing the full ++ // command. [2] ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'otelcontribcol --config config.yaml' ++ ContainerCommandLineKey = attribute.Key("container.command_line") ++ ++ // ContainerCommandArgsKey is the attribute Key conforming to the ++ // "container.command_args" semantic conventions. It represents the all the ++ // command arguments (including the command/executable itself) run by the ++ // container. [2] ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'otelcontribcol, --config, config.yaml' ++ ContainerCommandArgsKey = attribute.Key("container.command_args") ++) ++ ++// ContainerName returns an attribute KeyValue conforming to the ++// "container.name" semantic conventions. It represents the container name used ++// by container runtime. ++func ContainerName(val string) attribute.KeyValue { ++ return ContainerNameKey.String(val) ++} ++ ++// ContainerID returns an attribute KeyValue conforming to the ++// "container.id" semantic conventions. It represents the container ID. Usually ++// a UUID, as for example used to [identify Docker ++// containers](https://docs.docker.com/engine/reference/run/#container-identification). ++// The UUID might be abbreviated. ++func ContainerID(val string) attribute.KeyValue { ++ return ContainerIDKey.String(val) ++} ++ ++// ContainerRuntime returns an attribute KeyValue conforming to the ++// "container.runtime" semantic conventions. It represents the container ++// runtime managing this container. ++func ContainerRuntime(val string) attribute.KeyValue { ++ return ContainerRuntimeKey.String(val) ++} ++ ++// ContainerImageName returns an attribute KeyValue conforming to the ++// "container.image.name" semantic conventions. It represents the name of the ++// image the container was built on. ++func ContainerImageName(val string) attribute.KeyValue { ++ return ContainerImageNameKey.String(val) ++} ++ ++// ContainerImageTag returns an attribute KeyValue conforming to the ++// "container.image.tag" semantic conventions. It represents the container ++// image tag. ++func ContainerImageTag(val string) attribute.KeyValue { ++ return ContainerImageTagKey.String(val) ++} ++ ++// ContainerImageID returns an attribute KeyValue conforming to the ++// "container.image.id" semantic conventions. It represents the runtime ++// specific image identifier. Usually a hash algorithm followed by a UUID. ++func ContainerImageID(val string) attribute.KeyValue { ++ return ContainerImageIDKey.String(val) ++} ++ ++// ContainerCommand returns an attribute KeyValue conforming to the ++// "container.command" semantic conventions. It represents the command used to ++// run the container (i.e. the command name). ++func ContainerCommand(val string) attribute.KeyValue { ++ return ContainerCommandKey.String(val) ++} ++ ++// ContainerCommandLine returns an attribute KeyValue conforming to the ++// "container.command_line" semantic conventions. It represents the full ++// command run by the container as a single string representing the full ++// command. [2] ++func ContainerCommandLine(val string) attribute.KeyValue { ++ return ContainerCommandLineKey.String(val) ++} ++ ++// ContainerCommandArgs returns an attribute KeyValue conforming to the ++// "container.command_args" semantic conventions. It represents the all the ++// command arguments (including the command/executable itself) run by the ++// container. [2] ++func ContainerCommandArgs(val ...string) attribute.KeyValue { ++ return ContainerCommandArgsKey.StringSlice(val) ++} ++ ++// The software deployment. ++const ( ++ // DeploymentEnvironmentKey is the attribute Key conforming to the ++ // "deployment.environment" semantic conventions. It represents the name of ++ // the [deployment ++ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka ++ // deployment tier). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'staging', 'production' ++ DeploymentEnvironmentKey = attribute.Key("deployment.environment") ++) ++ ++// DeploymentEnvironment returns an attribute KeyValue conforming to the ++// "deployment.environment" semantic conventions. It represents the name of the ++// [deployment ++// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka ++// deployment tier). ++func DeploymentEnvironment(val string) attribute.KeyValue { ++ return DeploymentEnvironmentKey.String(val) ++} ++ ++// The device on which the process represented by this resource is running. ++const ( ++ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic ++ // conventions. It represents a unique identifier representing the device ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' ++ // Note: The device identifier MUST only be defined using the values ++ // outlined below. This value is not an advertising identifier and MUST NOT ++ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal ++ // to the [vendor ++ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). ++ // On Android (Java or Kotlin), this value MUST be equal to the Firebase ++ // Installation ID or a globally unique UUID which is persisted across ++ // sessions in your application. More information can be found ++ // [here](https://developer.android.com/training/articles/user-data-ids) on ++ // best practices and exact implementation details. Caution should be taken ++ // when storing personal data or anything which can identify a user. GDPR ++ // and data protection laws may apply, ensure you do your own due ++ // diligence. ++ DeviceIDKey = attribute.Key("device.id") ++ ++ // DeviceModelIdentifierKey is the attribute Key conforming to the ++ // "device.model.identifier" semantic conventions. It represents the model ++ // identifier for the device ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iPhone3,4', 'SM-G920F' ++ // Note: It's recommended this value represents a machine readable version ++ // of the model identifier rather than the market or consumer-friendly name ++ // of the device. ++ DeviceModelIdentifierKey = attribute.Key("device.model.identifier") ++ ++ // DeviceModelNameKey is the attribute Key conforming to the ++ // "device.model.name" semantic conventions. It represents the marketing ++ // name for the device model ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' ++ // Note: It's recommended this value represents a human readable version of ++ // the device model rather than a machine readable alternative. ++ DeviceModelNameKey = attribute.Key("device.model.name") ++ ++ // DeviceManufacturerKey is the attribute Key conforming to the ++ // "device.manufacturer" semantic conventions. It represents the name of ++ // the device manufacturer ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Apple', 'Samsung' ++ // Note: The Android OS provides this field via ++ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). ++ // iOS apps SHOULD hardcode the value `Apple`. ++ DeviceManufacturerKey = attribute.Key("device.manufacturer") ++) ++ ++// DeviceID returns an attribute KeyValue conforming to the "device.id" ++// semantic conventions. It represents a unique identifier representing the ++// device ++func DeviceID(val string) attribute.KeyValue { ++ return DeviceIDKey.String(val) ++} ++ ++// DeviceModelIdentifier returns an attribute KeyValue conforming to the ++// "device.model.identifier" semantic conventions. It represents the model ++// identifier for the device ++func DeviceModelIdentifier(val string) attribute.KeyValue { ++ return DeviceModelIdentifierKey.String(val) ++} ++ ++// DeviceModelName returns an attribute KeyValue conforming to the ++// "device.model.name" semantic conventions. It represents the marketing name ++// for the device model ++func DeviceModelName(val string) attribute.KeyValue { ++ return DeviceModelNameKey.String(val) ++} ++ ++// DeviceManufacturer returns an attribute KeyValue conforming to the ++// "device.manufacturer" semantic conventions. It represents the name of the ++// device manufacturer ++func DeviceManufacturer(val string) attribute.KeyValue { ++ return DeviceManufacturerKey.String(val) ++} ++ ++// A serverless instance. ++const ( ++ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic ++ // conventions. It represents the name of the single function that this ++ // runtime instance executes. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'my-function', 'myazurefunctionapp/some-function-name' ++ // Note: This is the name of the function as configured/deployed on the ++ // FaaS ++ // platform and is usually different from the name of the callback ++ // function (which may be stored in the ++ // [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes) ++ // span attributes). ++ // ++ // For some cloud providers, the above definition is ambiguous. The ++ // following ++ // definition of function name MUST be used for this attribute ++ // (and consequently the span name) for the listed cloud ++ // providers/products: ++ // ++ // * **Azure:** The full name `/`, i.e., function app name ++ // followed by a forward slash followed by the function name (this form ++ // can also be seen in the resource JSON for the function). ++ // This means that a span attribute MUST be used, as an Azure function ++ // app can host multiple functions that would usually share ++ // a TracerProvider (see also the `cloud.resource_id` attribute). ++ FaaSNameKey = attribute.Key("faas.name") ++ ++ // FaaSVersionKey is the attribute Key conforming to the "faas.version" ++ // semantic conventions. It represents the immutable version of the ++ // function being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '26', 'pinkfroid-00002' ++ // Note: Depending on the cloud provider and platform, use: ++ // ++ // * **AWS Lambda:** The [function ++ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) ++ // (an integer represented as a decimal string). ++ // * **Google Cloud Run (Services):** The ++ // [revision](https://cloud.google.com/run/docs/managing/revisions) ++ // (i.e., the function name plus the revision suffix). ++ // * **Google Cloud Functions:** The value of the ++ // [`K_REVISION` environment ++ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). ++ // * **Azure Functions:** Not applicable. Do not set this attribute. ++ FaaSVersionKey = attribute.Key("faas.version") ++ ++ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" ++ // semantic conventions. It represents the execution environment ID as a ++ // string, that will be potentially reused for other invocations to the ++ // same function/function version. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' ++ // Note: * **AWS Lambda:** Use the (full) log stream name. ++ FaaSInstanceKey = attribute.Key("faas.instance") ++ ++ // FaaSMaxMemoryKey is the attribute Key conforming to the ++ // "faas.max_memory" semantic conventions. It represents the amount of ++ // memory available to the serverless function converted to Bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 134217728 ++ // Note: It's recommended to set this attribute since e.g. too little ++ // memory can easily stop a Java AWS Lambda function from working ++ // correctly. On AWS Lambda, the environment variable ++ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must ++ // be multiplied by 1,048,576). ++ FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ++) ++ ++// FaaSName returns an attribute KeyValue conforming to the "faas.name" ++// semantic conventions. It represents the name of the single function that ++// this runtime instance executes. ++func FaaSName(val string) attribute.KeyValue { ++ return FaaSNameKey.String(val) ++} ++ ++// FaaSVersion returns an attribute KeyValue conforming to the ++// "faas.version" semantic conventions. It represents the immutable version of ++// the function being executed. ++func FaaSVersion(val string) attribute.KeyValue { ++ return FaaSVersionKey.String(val) ++} ++ ++// FaaSInstance returns an attribute KeyValue conforming to the ++// "faas.instance" semantic conventions. It represents the execution ++// environment ID as a string, that will be potentially reused for other ++// invocations to the same function/function version. ++func FaaSInstance(val string) attribute.KeyValue { ++ return FaaSInstanceKey.String(val) ++} ++ ++// FaaSMaxMemory returns an attribute KeyValue conforming to the ++// "faas.max_memory" semantic conventions. It represents the amount of memory ++// available to the serverless function converted to Bytes. ++func FaaSMaxMemory(val int) attribute.KeyValue { ++ return FaaSMaxMemoryKey.Int(val) ++} ++ ++// A host is defined as a computing instance. For example, physical servers, ++// virtual machines, switches or disk array. ++const ( ++ // HostIDKey is the attribute Key conforming to the "host.id" semantic ++ // conventions. It represents the unique host ID. For Cloud, this must be ++ // the instance_id assigned by the cloud provider. For non-containerized ++ // systems, this should be the `machine-id`. See the table below for the ++ // sources to use to determine the `machine-id` based on operating system. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'fdbf79e8af94cb7f9e8df36789187052' ++ HostIDKey = attribute.Key("host.id") ++ ++ // HostNameKey is the attribute Key conforming to the "host.name" semantic ++ // conventions. It represents the name of the host. On Unix systems, it may ++ // contain what the hostname command returns, or the fully qualified ++ // hostname, or another name specified by the user. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-test' ++ HostNameKey = attribute.Key("host.name") ++ ++ // HostTypeKey is the attribute Key conforming to the "host.type" semantic ++ // conventions. It represents the type of host. For Cloud, this must be the ++ // machine type. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'n1-standard-1' ++ HostTypeKey = attribute.Key("host.type") ++ ++ // HostArchKey is the attribute Key conforming to the "host.arch" semantic ++ // conventions. It represents the CPU architecture the host system is ++ // running on. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ HostArchKey = attribute.Key("host.arch") ++ ++ // HostImageNameKey is the attribute Key conforming to the ++ // "host.image.name" semantic conventions. It represents the name of the VM ++ // image or OS install the host was instantiated from. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' ++ HostImageNameKey = attribute.Key("host.image.name") ++ ++ // HostImageIDKey is the attribute Key conforming to the "host.image.id" ++ // semantic conventions. It represents the vM image ID or host OS image ID. ++ // For Cloud, this value is from the provider. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'ami-07b06b442921831e5' ++ HostImageIDKey = attribute.Key("host.image.id") ++ ++ // HostImageVersionKey is the attribute Key conforming to the ++ // "host.image.version" semantic conventions. It represents the version ++ // string of the VM image or host OS as defined in [Version ++ // Attributes](README.md#version-attributes). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0.1' ++ HostImageVersionKey = attribute.Key("host.image.version") ++) ++ ++var ( ++ // AMD64 ++ HostArchAMD64 = HostArchKey.String("amd64") ++ // ARM32 ++ HostArchARM32 = HostArchKey.String("arm32") ++ // ARM64 ++ HostArchARM64 = HostArchKey.String("arm64") ++ // Itanium ++ HostArchIA64 = HostArchKey.String("ia64") ++ // 32-bit PowerPC ++ HostArchPPC32 = HostArchKey.String("ppc32") ++ // 64-bit PowerPC ++ HostArchPPC64 = HostArchKey.String("ppc64") ++ // IBM z/Architecture ++ HostArchS390x = HostArchKey.String("s390x") ++ // 32-bit x86 ++ HostArchX86 = HostArchKey.String("x86") ++) ++ ++// HostID returns an attribute KeyValue conforming to the "host.id" semantic ++// conventions. It represents the unique host ID. For Cloud, this must be the ++// instance_id assigned by the cloud provider. For non-containerized systems, ++// this should be the `machine-id`. See the table below for the sources to use ++// to determine the `machine-id` based on operating system. ++func HostID(val string) attribute.KeyValue { ++ return HostIDKey.String(val) ++} ++ ++// HostName returns an attribute KeyValue conforming to the "host.name" ++// semantic conventions. It represents the name of the host. On Unix systems, ++// it may contain what the hostname command returns, or the fully qualified ++// hostname, or another name specified by the user. ++func HostName(val string) attribute.KeyValue { ++ return HostNameKey.String(val) ++} ++ ++// HostType returns an attribute KeyValue conforming to the "host.type" ++// semantic conventions. It represents the type of host. For Cloud, this must ++// be the machine type. ++func HostType(val string) attribute.KeyValue { ++ return HostTypeKey.String(val) ++} ++ ++// HostImageName returns an attribute KeyValue conforming to the ++// "host.image.name" semantic conventions. It represents the name of the VM ++// image or OS install the host was instantiated from. ++func HostImageName(val string) attribute.KeyValue { ++ return HostImageNameKey.String(val) ++} ++ ++// HostImageID returns an attribute KeyValue conforming to the ++// "host.image.id" semantic conventions. It represents the vM image ID or host ++// OS image ID. For Cloud, this value is from the provider. ++func HostImageID(val string) attribute.KeyValue { ++ return HostImageIDKey.String(val) ++} ++ ++// HostImageVersion returns an attribute KeyValue conforming to the ++// "host.image.version" semantic conventions. It represents the version string ++// of the VM image or host OS as defined in [Version ++// Attributes](README.md#version-attributes). ++func HostImageVersion(val string) attribute.KeyValue { ++ return HostImageVersionKey.String(val) ++} ++ ++// A Kubernetes Cluster. ++const ( ++ // K8SClusterNameKey is the attribute Key conforming to the ++ // "k8s.cluster.name" semantic conventions. It represents the name of the ++ // cluster. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-cluster' ++ K8SClusterNameKey = attribute.Key("k8s.cluster.name") ++ ++ // K8SClusterUIDKey is the attribute Key conforming to the ++ // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for ++ // the cluster, set to the UID of the `kube-system` namespace. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' ++ // Note: K8S does not have support for obtaining a cluster ID. If this is ++ // ever ++ // added, we will recommend collecting the `k8s.cluster.uid` through the ++ // official APIs. In the meantime, we are able to use the `uid` of the ++ // `kube-system` namespace as a proxy for cluster ID. Read on for the ++ // rationale. ++ // ++ // Every object created in a K8S cluster is assigned a distinct UID. The ++ // `kube-system` namespace is used by Kubernetes itself and will exist ++ // for the lifetime of the cluster. Using the `uid` of the `kube-system` ++ // namespace is a reasonable proxy for the K8S ClusterID as it will only ++ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are ++ // UUIDs as standardized by ++ // [ISO/IEC 9834-8 and ITU-T ++ // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). ++ // Which states: ++ // ++ // > If generated according to one of the mechanisms defined in Rec. ++ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be ++ // different from all other UUIDs generated before 3603 A.D., or is ++ // extremely likely to be different (depending on the mechanism chosen). ++ // ++ // Therefore, UIDs between clusters should be extremely unlikely to ++ // conflict. ++ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") ++) ++ ++// K8SClusterName returns an attribute KeyValue conforming to the ++// "k8s.cluster.name" semantic conventions. It represents the name of the ++// cluster. ++func K8SClusterName(val string) attribute.KeyValue { ++ return K8SClusterNameKey.String(val) ++} ++ ++// K8SClusterUID returns an attribute KeyValue conforming to the ++// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the ++// cluster, set to the UID of the `kube-system` namespace. ++func K8SClusterUID(val string) attribute.KeyValue { ++ return K8SClusterUIDKey.String(val) ++} ++ ++// A Kubernetes Node object. ++const ( ++ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" ++ // semantic conventions. It represents the name of the Node. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'node-1' ++ K8SNodeNameKey = attribute.Key("k8s.node.name") ++ ++ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" ++ // semantic conventions. It represents the UID of the Node. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' ++ K8SNodeUIDKey = attribute.Key("k8s.node.uid") ++) ++ ++// K8SNodeName returns an attribute KeyValue conforming to the ++// "k8s.node.name" semantic conventions. It represents the name of the Node. ++func K8SNodeName(val string) attribute.KeyValue { ++ return K8SNodeNameKey.String(val) ++} ++ ++// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" ++// semantic conventions. It represents the UID of the Node. ++func K8SNodeUID(val string) attribute.KeyValue { ++ return K8SNodeUIDKey.String(val) ++} ++ ++// A Kubernetes Namespace. ++const ( ++ // K8SNamespaceNameKey is the attribute Key conforming to the ++ // "k8s.namespace.name" semantic conventions. It represents the name of the ++ // namespace that the pod is running in. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'default' ++ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ++) ++ ++// K8SNamespaceName returns an attribute KeyValue conforming to the ++// "k8s.namespace.name" semantic conventions. It represents the name of the ++// namespace that the pod is running in. ++func K8SNamespaceName(val string) attribute.KeyValue { ++ return K8SNamespaceNameKey.String(val) ++} ++ ++// A Kubernetes Pod object. ++const ( ++ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" ++ // semantic conventions. It represents the UID of the Pod. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SPodUIDKey = attribute.Key("k8s.pod.uid") ++ ++ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" ++ // semantic conventions. It represents the name of the Pod. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-pod-autoconf' ++ K8SPodNameKey = attribute.Key("k8s.pod.name") ++) ++ ++// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" ++// semantic conventions. It represents the UID of the Pod. ++func K8SPodUID(val string) attribute.KeyValue { ++ return K8SPodUIDKey.String(val) ++} ++ ++// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" ++// semantic conventions. It represents the name of the Pod. ++func K8SPodName(val string) attribute.KeyValue { ++ return K8SPodNameKey.String(val) ++} ++ ++// A container in a ++// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). ++const ( ++ // K8SContainerNameKey is the attribute Key conforming to the ++ // "k8s.container.name" semantic conventions. It represents the name of the ++ // Container from Pod specification, must be unique within a Pod. Container ++ // runtime usually uses different globally unique name (`container.name`). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'redis' ++ K8SContainerNameKey = attribute.Key("k8s.container.name") ++ ++ // K8SContainerRestartCountKey is the attribute Key conforming to the ++ // "k8s.container.restart_count" semantic conventions. It represents the ++ // number of times the container was restarted. This attribute can be used ++ // to identify a particular container (running or stopped) within a ++ // container spec. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 0, 2 ++ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ++) ++ ++// K8SContainerName returns an attribute KeyValue conforming to the ++// "k8s.container.name" semantic conventions. It represents the name of the ++// Container from Pod specification, must be unique within a Pod. Container ++// runtime usually uses different globally unique name (`container.name`). ++func K8SContainerName(val string) attribute.KeyValue { ++ return K8SContainerNameKey.String(val) ++} ++ ++// K8SContainerRestartCount returns an attribute KeyValue conforming to the ++// "k8s.container.restart_count" semantic conventions. It represents the number ++// of times the container was restarted. This attribute can be used to identify ++// a particular container (running or stopped) within a container spec. ++func K8SContainerRestartCount(val int) attribute.KeyValue { ++ return K8SContainerRestartCountKey.Int(val) ++} ++ ++// A Kubernetes ReplicaSet object. ++const ( ++ // K8SReplicaSetUIDKey is the attribute Key conforming to the ++ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the ++ // ReplicaSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") ++ ++ // K8SReplicaSetNameKey is the attribute Key conforming to the ++ // "k8s.replicaset.name" semantic conventions. It represents the name of ++ // the ReplicaSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ++) ++ ++// K8SReplicaSetUID returns an attribute KeyValue conforming to the ++// "k8s.replicaset.uid" semantic conventions. It represents the UID of the ++// ReplicaSet. ++func K8SReplicaSetUID(val string) attribute.KeyValue { ++ return K8SReplicaSetUIDKey.String(val) ++} ++ ++// K8SReplicaSetName returns an attribute KeyValue conforming to the ++// "k8s.replicaset.name" semantic conventions. It represents the name of the ++// ReplicaSet. ++func K8SReplicaSetName(val string) attribute.KeyValue { ++ return K8SReplicaSetNameKey.String(val) ++} ++ ++// A Kubernetes Deployment object. ++const ( ++ // K8SDeploymentUIDKey is the attribute Key conforming to the ++ // "k8s.deployment.uid" semantic conventions. It represents the UID of the ++ // Deployment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") ++ ++ // K8SDeploymentNameKey is the attribute Key conforming to the ++ // "k8s.deployment.name" semantic conventions. It represents the name of ++ // the Deployment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ++) ++ ++// K8SDeploymentUID returns an attribute KeyValue conforming to the ++// "k8s.deployment.uid" semantic conventions. It represents the UID of the ++// Deployment. ++func K8SDeploymentUID(val string) attribute.KeyValue { ++ return K8SDeploymentUIDKey.String(val) ++} ++ ++// K8SDeploymentName returns an attribute KeyValue conforming to the ++// "k8s.deployment.name" semantic conventions. It represents the name of the ++// Deployment. ++func K8SDeploymentName(val string) attribute.KeyValue { ++ return K8SDeploymentNameKey.String(val) ++} ++ ++// A Kubernetes StatefulSet object. ++const ( ++ // K8SStatefulSetUIDKey is the attribute Key conforming to the ++ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the ++ // StatefulSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") ++ ++ // K8SStatefulSetNameKey is the attribute Key conforming to the ++ // "k8s.statefulset.name" semantic conventions. It represents the name of ++ // the StatefulSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ++) ++ ++// K8SStatefulSetUID returns an attribute KeyValue conforming to the ++// "k8s.statefulset.uid" semantic conventions. It represents the UID of the ++// StatefulSet. ++func K8SStatefulSetUID(val string) attribute.KeyValue { ++ return K8SStatefulSetUIDKey.String(val) ++} ++ ++// K8SStatefulSetName returns an attribute KeyValue conforming to the ++// "k8s.statefulset.name" semantic conventions. It represents the name of the ++// StatefulSet. ++func K8SStatefulSetName(val string) attribute.KeyValue { ++ return K8SStatefulSetNameKey.String(val) ++} ++ ++// A Kubernetes DaemonSet object. ++const ( ++ // K8SDaemonSetUIDKey is the attribute Key conforming to the ++ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the ++ // DaemonSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") ++ ++ // K8SDaemonSetNameKey is the attribute Key conforming to the ++ // "k8s.daemonset.name" semantic conventions. It represents the name of the ++ // DaemonSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ++) ++ ++// K8SDaemonSetUID returns an attribute KeyValue conforming to the ++// "k8s.daemonset.uid" semantic conventions. It represents the UID of the ++// DaemonSet. ++func K8SDaemonSetUID(val string) attribute.KeyValue { ++ return K8SDaemonSetUIDKey.String(val) ++} ++ ++// K8SDaemonSetName returns an attribute KeyValue conforming to the ++// "k8s.daemonset.name" semantic conventions. It represents the name of the ++// DaemonSet. ++func K8SDaemonSetName(val string) attribute.KeyValue { ++ return K8SDaemonSetNameKey.String(val) ++} ++ ++// A Kubernetes Job object. ++const ( ++ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" ++ // semantic conventions. It represents the UID of the Job. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SJobUIDKey = attribute.Key("k8s.job.uid") ++ ++ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" ++ // semantic conventions. It represents the name of the Job. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SJobNameKey = attribute.Key("k8s.job.name") ++) ++ ++// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" ++// semantic conventions. It represents the UID of the Job. ++func K8SJobUID(val string) attribute.KeyValue { ++ return K8SJobUIDKey.String(val) ++} ++ ++// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" ++// semantic conventions. It represents the name of the Job. ++func K8SJobName(val string) attribute.KeyValue { ++ return K8SJobNameKey.String(val) ++} ++ ++// A Kubernetes CronJob object. ++const ( ++ // K8SCronJobUIDKey is the attribute Key conforming to the ++ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the ++ // CronJob. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") ++ ++ // K8SCronJobNameKey is the attribute Key conforming to the ++ // "k8s.cronjob.name" semantic conventions. It represents the name of the ++ // CronJob. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ++) ++ ++// K8SCronJobUID returns an attribute KeyValue conforming to the ++// "k8s.cronjob.uid" semantic conventions. It represents the UID of the ++// CronJob. ++func K8SCronJobUID(val string) attribute.KeyValue { ++ return K8SCronJobUIDKey.String(val) ++} ++ ++// K8SCronJobName returns an attribute KeyValue conforming to the ++// "k8s.cronjob.name" semantic conventions. It represents the name of the ++// CronJob. ++func K8SCronJobName(val string) attribute.KeyValue { ++ return K8SCronJobNameKey.String(val) ++} ++ ++// The operating system (OS) on which the process represented by this resource ++// is running. ++const ( ++ // OSTypeKey is the attribute Key conforming to the "os.type" semantic ++ // conventions. It represents the operating system type. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ OSTypeKey = attribute.Key("os.type") ++ ++ // OSDescriptionKey is the attribute Key conforming to the "os.description" ++ // semantic conventions. It represents the human readable (not intended to ++ // be parsed) OS version information, like e.g. reported by `ver` or ++ // `lsb_release -a` commands. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 ++ // LTS' ++ OSDescriptionKey = attribute.Key("os.description") ++ ++ // OSNameKey is the attribute Key conforming to the "os.name" semantic ++ // conventions. It represents the human readable operating system name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iOS', 'Android', 'Ubuntu' ++ OSNameKey = attribute.Key("os.name") ++ ++ // OSVersionKey is the attribute Key conforming to the "os.version" ++ // semantic conventions. It represents the version string of the operating ++ // system as defined in [Version ++ // Attributes](/docs/resource/README.md#version-attributes). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '14.2.1', '18.04.1' ++ OSVersionKey = attribute.Key("os.version") ++) ++ ++var ( ++ // Microsoft Windows ++ OSTypeWindows = OSTypeKey.String("windows") ++ // Linux ++ OSTypeLinux = OSTypeKey.String("linux") ++ // Apple Darwin ++ OSTypeDarwin = OSTypeKey.String("darwin") ++ // FreeBSD ++ OSTypeFreeBSD = OSTypeKey.String("freebsd") ++ // NetBSD ++ OSTypeNetBSD = OSTypeKey.String("netbsd") ++ // OpenBSD ++ OSTypeOpenBSD = OSTypeKey.String("openbsd") ++ // DragonFly BSD ++ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") ++ // HP-UX (Hewlett Packard Unix) ++ OSTypeHPUX = OSTypeKey.String("hpux") ++ // AIX (Advanced Interactive eXecutive) ++ OSTypeAIX = OSTypeKey.String("aix") ++ // SunOS, Oracle Solaris ++ OSTypeSolaris = OSTypeKey.String("solaris") ++ // IBM z/OS ++ OSTypeZOS = OSTypeKey.String("z_os") ++) ++ ++// OSDescription returns an attribute KeyValue conforming to the ++// "os.description" semantic conventions. It represents the human readable (not ++// intended to be parsed) OS version information, like e.g. reported by `ver` ++// or `lsb_release -a` commands. ++func OSDescription(val string) attribute.KeyValue { ++ return OSDescriptionKey.String(val) ++} ++ ++// OSName returns an attribute KeyValue conforming to the "os.name" semantic ++// conventions. It represents the human readable operating system name. ++func OSName(val string) attribute.KeyValue { ++ return OSNameKey.String(val) ++} ++ ++// OSVersion returns an attribute KeyValue conforming to the "os.version" ++// semantic conventions. It represents the version string of the operating ++// system as defined in [Version ++// Attributes](/docs/resource/README.md#version-attributes). ++func OSVersion(val string) attribute.KeyValue { ++ return OSVersionKey.String(val) ++} ++ ++// An operating system process. ++const ( ++ // ProcessPIDKey is the attribute Key conforming to the "process.pid" ++ // semantic conventions. It represents the process identifier (PID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1234 ++ ProcessPIDKey = attribute.Key("process.pid") ++ ++ // ProcessParentPIDKey is the attribute Key conforming to the ++ // "process.parent_pid" semantic conventions. It represents the parent ++ // Process identifier (PID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 111 ++ ProcessParentPIDKey = attribute.Key("process.parent_pid") ++ ++ // ProcessExecutableNameKey is the attribute Key conforming to the ++ // "process.executable.name" semantic conventions. It represents the name ++ // of the process executable. On Linux based systems, can be set to the ++ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name ++ // of `GetProcessImageFileNameW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'otelcol' ++ ProcessExecutableNameKey = attribute.Key("process.executable.name") ++ ++ // ProcessExecutablePathKey is the attribute Key conforming to the ++ // "process.executable.path" semantic conventions. It represents the full ++ // path to the process executable. On Linux based systems, can be set to ++ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of ++ // `GetProcessImageFileNameW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: '/usr/bin/cmd/otelcol' ++ ProcessExecutablePathKey = attribute.Key("process.executable.path") ++ ++ // ProcessCommandKey is the attribute Key conforming to the ++ // "process.command" semantic conventions. It represents the command used ++ // to launch the process (i.e. the command name). On Linux based systems, ++ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can ++ // be set to the first parameter extracted from `GetCommandLineW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'cmd/otelcol' ++ ProcessCommandKey = attribute.Key("process.command") ++ ++ // ProcessCommandLineKey is the attribute Key conforming to the ++ // "process.command_line" semantic conventions. It represents the full ++ // command used to launch the process as a single string representing the ++ // full command. On Windows, can be set to the result of `GetCommandLineW`. ++ // Do not set this if you have to assemble it just for monitoring; use ++ // `process.command_args` instead. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ++ ProcessCommandLineKey = attribute.Key("process.command_line") ++ ++ // ProcessCommandArgsKey is the attribute Key conforming to the ++ // "process.command_args" semantic conventions. It represents the all the ++ // command arguments (including the command/executable itself) as received ++ // by the process. On Linux-based systems (and some other Unixoid systems ++ // supporting procfs), can be set according to the list of null-delimited ++ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, ++ // this would be the full argv vector passed to `main`. ++ // ++ // Type: string[] ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'cmd/otecol', '--config=config.yaml' ++ ProcessCommandArgsKey = attribute.Key("process.command_args") ++ ++ // ProcessOwnerKey is the attribute Key conforming to the "process.owner" ++ // semantic conventions. It represents the username of the user that owns ++ // the process. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'root' ++ ProcessOwnerKey = attribute.Key("process.owner") ++) ++ ++// ProcessPID returns an attribute KeyValue conforming to the "process.pid" ++// semantic conventions. It represents the process identifier (PID). ++func ProcessPID(val int) attribute.KeyValue { ++ return ProcessPIDKey.Int(val) ++} ++ ++// ProcessParentPID returns an attribute KeyValue conforming to the ++// "process.parent_pid" semantic conventions. It represents the parent Process ++// identifier (PID). ++func ProcessParentPID(val int) attribute.KeyValue { ++ return ProcessParentPIDKey.Int(val) ++} ++ ++// ProcessExecutableName returns an attribute KeyValue conforming to the ++// "process.executable.name" semantic conventions. It represents the name of ++// the process executable. On Linux based systems, can be set to the `Name` in ++// `proc/[pid]/status`. On Windows, can be set to the base name of ++// `GetProcessImageFileNameW`. ++func ProcessExecutableName(val string) attribute.KeyValue { ++ return ProcessExecutableNameKey.String(val) ++} ++ ++// ProcessExecutablePath returns an attribute KeyValue conforming to the ++// "process.executable.path" semantic conventions. It represents the full path ++// to the process executable. On Linux based systems, can be set to the target ++// of `proc/[pid]/exe`. On Windows, can be set to the result of ++// `GetProcessImageFileNameW`. ++func ProcessExecutablePath(val string) attribute.KeyValue { ++ return ProcessExecutablePathKey.String(val) ++} ++ ++// ProcessCommand returns an attribute KeyValue conforming to the ++// "process.command" semantic conventions. It represents the command used to ++// launch the process (i.e. the command name). On Linux based systems, can be ++// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to ++// the first parameter extracted from `GetCommandLineW`. ++func ProcessCommand(val string) attribute.KeyValue { ++ return ProcessCommandKey.String(val) ++} ++ ++// ProcessCommandLine returns an attribute KeyValue conforming to the ++// "process.command_line" semantic conventions. It represents the full command ++// used to launch the process as a single string representing the full command. ++// On Windows, can be set to the result of `GetCommandLineW`. Do not set this ++// if you have to assemble it just for monitoring; use `process.command_args` ++// instead. ++func ProcessCommandLine(val string) attribute.KeyValue { ++ return ProcessCommandLineKey.String(val) ++} ++ ++// ProcessCommandArgs returns an attribute KeyValue conforming to the ++// "process.command_args" semantic conventions. It represents the all the ++// command arguments (including the command/executable itself) as received by ++// the process. On Linux-based systems (and some other Unixoid systems ++// supporting procfs), can be set according to the list of null-delimited ++// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, ++// this would be the full argv vector passed to `main`. ++func ProcessCommandArgs(val ...string) attribute.KeyValue { ++ return ProcessCommandArgsKey.StringSlice(val) ++} ++ ++// ProcessOwner returns an attribute KeyValue conforming to the ++// "process.owner" semantic conventions. It represents the username of the user ++// that owns the process. ++func ProcessOwner(val string) attribute.KeyValue { ++ return ProcessOwnerKey.String(val) ++} ++ ++// The single (language) runtime instance which is monitored. ++const ( ++ // ProcessRuntimeNameKey is the attribute Key conforming to the ++ // "process.runtime.name" semantic conventions. It represents the name of ++ // the runtime of this process. For compiled native binaries, this SHOULD ++ // be the name of the compiler. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'OpenJDK Runtime Environment' ++ ProcessRuntimeNameKey = attribute.Key("process.runtime.name") ++ ++ // ProcessRuntimeVersionKey is the attribute Key conforming to the ++ // "process.runtime.version" semantic conventions. It represents the ++ // version of the runtime of this process, as returned by the runtime ++ // without modification. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '14.0.2' ++ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") ++ ++ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the ++ // "process.runtime.description" semantic conventions. It represents an ++ // additional description about the runtime of the process, for example a ++ // specific vendor customization of the runtime environment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ++ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ++) ++ ++// ProcessRuntimeName returns an attribute KeyValue conforming to the ++// "process.runtime.name" semantic conventions. It represents the name of the ++// runtime of this process. For compiled native binaries, this SHOULD be the ++// name of the compiler. ++func ProcessRuntimeName(val string) attribute.KeyValue { ++ return ProcessRuntimeNameKey.String(val) ++} ++ ++// ProcessRuntimeVersion returns an attribute KeyValue conforming to the ++// "process.runtime.version" semantic conventions. It represents the version of ++// the runtime of this process, as returned by the runtime without ++// modification. ++func ProcessRuntimeVersion(val string) attribute.KeyValue { ++ return ProcessRuntimeVersionKey.String(val) ++} ++ ++// ProcessRuntimeDescription returns an attribute KeyValue conforming to the ++// "process.runtime.description" semantic conventions. It represents an ++// additional description about the runtime of the process, for example a ++// specific vendor customization of the runtime environment. ++func ProcessRuntimeDescription(val string) attribute.KeyValue { ++ return ProcessRuntimeDescriptionKey.String(val) ++} ++ ++// A service instance. ++const ( ++ // ServiceNameKey is the attribute Key conforming to the "service.name" ++ // semantic conventions. It represents the logical name of the service. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'shoppingcart' ++ // Note: MUST be the same for all instances of horizontally scaled ++ // services. If the value was not specified, SDKs MUST fallback to ++ // `unknown_service:` concatenated with ++ // [`process.executable.name`](process.md#process), e.g. ++ // `unknown_service:bash`. If `process.executable.name` is not available, ++ // the value MUST be set to `unknown_service`. ++ ServiceNameKey = attribute.Key("service.name") ++ ++ // ServiceVersionKey is the attribute Key conforming to the ++ // "service.version" semantic conventions. It represents the version string ++ // of the service API or implementation. The format is not defined by these ++ // conventions. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2.0.0', 'a01dbef8a' ++ ServiceVersionKey = attribute.Key("service.version") ++) ++ ++// ServiceName returns an attribute KeyValue conforming to the ++// "service.name" semantic conventions. It represents the logical name of the ++// service. ++func ServiceName(val string) attribute.KeyValue { ++ return ServiceNameKey.String(val) ++} ++ ++// ServiceVersion returns an attribute KeyValue conforming to the ++// "service.version" semantic conventions. It represents the version string of ++// the service API or implementation. The format is not defined by these ++// conventions. ++func ServiceVersion(val string) attribute.KeyValue { ++ return ServiceVersionKey.String(val) ++} ++ ++// A service instance. ++const ( ++ // ServiceNamespaceKey is the attribute Key conforming to the ++ // "service.namespace" semantic conventions. It represents a namespace for ++ // `service.name`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Shop' ++ // Note: A string value having a meaning that helps to distinguish a group ++ // of services, for example the team name that owns a group of services. ++ // `service.name` is expected to be unique within the same namespace. If ++ // `service.namespace` is not specified in the Resource then `service.name` ++ // is expected to be unique for all services that have no explicit ++ // namespace defined (so the empty/unspecified namespace is simply one more ++ // valid namespace). Zero-length namespace string is assumed equal to ++ // unspecified namespace. ++ ServiceNamespaceKey = attribute.Key("service.namespace") ++ ++ // ServiceInstanceIDKey is the attribute Key conforming to the ++ // "service.instance.id" semantic conventions. It represents the string ID ++ // of the service instance. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'my-k8s-pod-deployment-1', ++ // '627cc493-f310-47de-96bd-71410b7dec09' ++ // Note: MUST be unique for each instance of the same ++ // `service.namespace,service.name` pair (in other words ++ // `service.namespace,service.name,service.instance.id` triplet MUST be ++ // globally unique). The ID helps to distinguish instances of the same ++ // service that exist at the same time (e.g. instances of a horizontally ++ // scaled service). It is preferable for the ID to be persistent and stay ++ // the same for the lifetime of the service instance, however it is ++ // acceptable that the ID is ephemeral and changes during important ++ // lifetime events for the service (e.g. service restarts). If the service ++ // has no inherent unique ID that can be used as the value of this ++ // attribute it is recommended to generate a random Version 1 or Version 4 ++ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use ++ // Version 5, see RFC 4122 for more recommendations). ++ ServiceInstanceIDKey = attribute.Key("service.instance.id") ++) ++ ++// ServiceNamespace returns an attribute KeyValue conforming to the ++// "service.namespace" semantic conventions. It represents a namespace for ++// `service.name`. ++func ServiceNamespace(val string) attribute.KeyValue { ++ return ServiceNamespaceKey.String(val) ++} ++ ++// ServiceInstanceID returns an attribute KeyValue conforming to the ++// "service.instance.id" semantic conventions. It represents the string ID of ++// the service instance. ++func ServiceInstanceID(val string) attribute.KeyValue { ++ return ServiceInstanceIDKey.String(val) ++} ++ ++// The telemetry SDK used to capture data recorded by the instrumentation ++// libraries. ++const ( ++ // TelemetrySDKNameKey is the attribute Key conforming to the ++ // "telemetry.sdk.name" semantic conventions. It represents the name of the ++ // telemetry SDK as defined above. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute ++ // to `opentelemetry`. ++ // If another SDK, like a fork or a vendor-provided implementation, is ++ // used, this SDK MUST set the ++ // `telemetry.sdk.name` attribute to the fully-qualified class or module ++ // name of this SDK's main entry point ++ // or another suitable identifier depending on the language. ++ // The identifier `opentelemetry` is reserved and MUST NOT be used in this ++ // case. ++ // All custom identifiers SHOULD be stable across different versions of an ++ // implementation. ++ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") ++ ++ // TelemetrySDKLanguageKey is the attribute Key conforming to the ++ // "telemetry.sdk.language" semantic conventions. It represents the ++ // language of the telemetry SDK. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") ++ ++ // TelemetrySDKVersionKey is the attribute Key conforming to the ++ // "telemetry.sdk.version" semantic conventions. It represents the version ++ // string of the telemetry SDK. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: '1.2.3' ++ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") ++) ++ ++var ( ++ // cpp ++ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") ++ // dotnet ++ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") ++ // erlang ++ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") ++ // go ++ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") ++ // java ++ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") ++ // nodejs ++ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") ++ // php ++ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") ++ // python ++ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") ++ // ruby ++ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") ++ // rust ++ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") ++ // swift ++ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ++ // webjs ++ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") ++) ++ ++// TelemetrySDKName returns an attribute KeyValue conforming to the ++// "telemetry.sdk.name" semantic conventions. It represents the name of the ++// telemetry SDK as defined above. ++func TelemetrySDKName(val string) attribute.KeyValue { ++ return TelemetrySDKNameKey.String(val) ++} ++ ++// TelemetrySDKVersion returns an attribute KeyValue conforming to the ++// "telemetry.sdk.version" semantic conventions. It represents the version ++// string of the telemetry SDK. ++func TelemetrySDKVersion(val string) attribute.KeyValue { ++ return TelemetrySDKVersionKey.String(val) ++} ++ ++// The telemetry SDK used to capture data recorded by the instrumentation ++// libraries. ++const ( ++ // TelemetryAutoVersionKey is the attribute Key conforming to the ++ // "telemetry.auto.version" semantic conventions. It represents the version ++ // string of the auto instrumentation agent, if used. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.2.3' ++ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ++) ++ ++// TelemetryAutoVersion returns an attribute KeyValue conforming to the ++// "telemetry.auto.version" semantic conventions. It represents the version ++// string of the auto instrumentation agent, if used. ++func TelemetryAutoVersion(val string) attribute.KeyValue { ++ return TelemetryAutoVersionKey.String(val) ++} ++ ++// Resource describing the packaged software running the application code. Web ++// engines are typically executed using process.runtime. ++const ( ++ // WebEngineNameKey is the attribute Key conforming to the "webengine.name" ++ // semantic conventions. It represents the name of the web engine. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'WildFly' ++ WebEngineNameKey = attribute.Key("webengine.name") ++ ++ // WebEngineVersionKey is the attribute Key conforming to the ++ // "webengine.version" semantic conventions. It represents the version of ++ // the web engine. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '21.0.0' ++ WebEngineVersionKey = attribute.Key("webengine.version") ++ ++ // WebEngineDescriptionKey is the attribute Key conforming to the ++ // "webengine.description" semantic conventions. It represents the ++ // additional description of the web engine (e.g. detailed version and ++ // edition information). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - ++ // 2.2.2.Final' ++ WebEngineDescriptionKey = attribute.Key("webengine.description") ++) ++ ++// WebEngineName returns an attribute KeyValue conforming to the ++// "webengine.name" semantic conventions. It represents the name of the web ++// engine. ++func WebEngineName(val string) attribute.KeyValue { ++ return WebEngineNameKey.String(val) ++} ++ ++// WebEngineVersion returns an attribute KeyValue conforming to the ++// "webengine.version" semantic conventions. It represents the version of the ++// web engine. ++func WebEngineVersion(val string) attribute.KeyValue { ++ return WebEngineVersionKey.String(val) ++} ++ ++// WebEngineDescription returns an attribute KeyValue conforming to the ++// "webengine.description" semantic conventions. It represents the additional ++// description of the web engine (e.g. detailed version and edition ++// information). ++func WebEngineDescription(val string) attribute.KeyValue { ++ return WebEngineDescriptionKey.String(val) ++} ++ ++// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's ++// concepts. ++const ( ++ // OTelScopeNameKey is the attribute Key conforming to the ++ // "otel.scope.name" semantic conventions. It represents the name of the ++ // instrumentation scope - (`InstrumentationScope.Name` in OTLP). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'io.opentelemetry.contrib.mongodb' ++ OTelScopeNameKey = attribute.Key("otel.scope.name") ++ ++ // OTelScopeVersionKey is the attribute Key conforming to the ++ // "otel.scope.version" semantic conventions. It represents the version of ++ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.0.0' ++ OTelScopeVersionKey = attribute.Key("otel.scope.version") ++) ++ ++// OTelScopeName returns an attribute KeyValue conforming to the ++// "otel.scope.name" semantic conventions. It represents the name of the ++// instrumentation scope - (`InstrumentationScope.Name` in OTLP). ++func OTelScopeName(val string) attribute.KeyValue { ++ return OTelScopeNameKey.String(val) ++} ++ ++// OTelScopeVersion returns an attribute KeyValue conforming to the ++// "otel.scope.version" semantic conventions. It represents the version of the ++// instrumentation scope - (`InstrumentationScope.Version` in OTLP). ++func OTelScopeVersion(val string) attribute.KeyValue { ++ return OTelScopeVersionKey.String(val) ++} ++ ++// Span attributes used by non-OTLP exporters to represent OpenTelemetry ++// Scope's concepts. ++const ( ++ // OTelLibraryNameKey is the attribute Key conforming to the ++ // "otel.library.name" semantic conventions. It represents the deprecated, ++ // use the `otel.scope.name` attribute. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'io.opentelemetry.contrib.mongodb' ++ OTelLibraryNameKey = attribute.Key("otel.library.name") ++ ++ // OTelLibraryVersionKey is the attribute Key conforming to the ++ // "otel.library.version" semantic conventions. It represents the ++ // deprecated, use the `otel.scope.version` attribute. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '1.0.0' ++ OTelLibraryVersionKey = attribute.Key("otel.library.version") ++) ++ ++// OTelLibraryName returns an attribute KeyValue conforming to the ++// "otel.library.name" semantic conventions. It represents the deprecated, use ++// the `otel.scope.name` attribute. ++func OTelLibraryName(val string) attribute.KeyValue { ++ return OTelLibraryNameKey.String(val) ++} ++ ++// OTelLibraryVersion returns an attribute KeyValue conforming to the ++// "otel.library.version" semantic conventions. It represents the deprecated, ++// use the `otel.scope.version` attribute. ++func OTelLibraryVersion(val string) attribute.KeyValue { ++ return OTelLibraryVersionKey.String(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go +new file mode 100644 +index 00000000000..66ffd5989f3 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++// SchemaURL is the schema URL that matches the version of the semantic conventions ++// that this package defines. Semconv packages starting from v1.4.0 must declare ++// non-empty schema URL in the form https://opentelemetry.io/schemas/ ++const SchemaURL = "https://opentelemetry.io/schemas/1.21.0" +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go +new file mode 100644 +index 00000000000..b5a91450d42 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go +@@ -0,0 +1,2495 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// The shared attributes used to report a single exception associated with a ++// span or log. ++const ( ++ // ExceptionTypeKey is the attribute Key conforming to the "exception.type" ++ // semantic conventions. It represents the type of the exception (its ++ // fully-qualified class name, if applicable). The dynamic type of the ++ // exception should be preferred over the static type in languages that ++ // support it. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'java.net.ConnectException', 'OSError' ++ ExceptionTypeKey = attribute.Key("exception.type") ++ ++ // ExceptionMessageKey is the attribute Key conforming to the ++ // "exception.message" semantic conventions. It represents the exception ++ // message. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Division by zero', "Can't convert 'int' object to str ++ // implicitly" ++ ExceptionMessageKey = attribute.Key("exception.message") ++ ++ // ExceptionStacktraceKey is the attribute Key conforming to the ++ // "exception.stacktrace" semantic conventions. It represents a stacktrace ++ // as a string in the natural representation for the language runtime. The ++ // representation is to be determined and documented by each language SIG. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test ++ // exception\\n at ' ++ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' ++ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' ++ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ++ ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ++) ++ ++// ExceptionType returns an attribute KeyValue conforming to the ++// "exception.type" semantic conventions. It represents the type of the ++// exception (its fully-qualified class name, if applicable). The dynamic type ++// of the exception should be preferred over the static type in languages that ++// support it. ++func ExceptionType(val string) attribute.KeyValue { ++ return ExceptionTypeKey.String(val) ++} ++ ++// ExceptionMessage returns an attribute KeyValue conforming to the ++// "exception.message" semantic conventions. It represents the exception ++// message. ++func ExceptionMessage(val string) attribute.KeyValue { ++ return ExceptionMessageKey.String(val) ++} ++ ++// ExceptionStacktrace returns an attribute KeyValue conforming to the ++// "exception.stacktrace" semantic conventions. It represents a stacktrace as a ++// string in the natural representation for the language runtime. The ++// representation is to be determined and documented by each language SIG. ++func ExceptionStacktrace(val string) attribute.KeyValue { ++ return ExceptionStacktraceKey.String(val) ++} ++ ++// Span attributes used by AWS Lambda (in addition to general `faas` ++// attributes). ++const ( ++ // AWSLambdaInvokedARNKey is the attribute Key conforming to the ++ // "aws.lambda.invoked_arn" semantic conventions. It represents the full ++ // invoked ARN as provided on the `Context` passed to the function ++ // (`Lambda-Runtime-Invoked-Function-ARN` header on the ++ // `/runtime/invocation/next` applicable). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' ++ // Note: This may be different from `cloud.resource_id` if an alias is ++ // involved. ++ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ++) ++ ++// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the ++// "aws.lambda.invoked_arn" semantic conventions. It represents the full ++// invoked ARN as provided on the `Context` passed to the function ++// (`Lambda-Runtime-Invoked-Function-ARN` header on the ++// `/runtime/invocation/next` applicable). ++func AWSLambdaInvokedARN(val string) attribute.KeyValue { ++ return AWSLambdaInvokedARNKey.String(val) ++} ++ ++// Attributes for CloudEvents. CloudEvents is a specification on how to define ++// event data in a standard way. These attributes can be attached to spans when ++// performing operations with CloudEvents, regardless of the protocol being ++// used. ++const ( ++ // CloudeventsEventIDKey is the attribute Key conforming to the ++ // "cloudevents.event_id" semantic conventions. It represents the ++ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) ++ // uniquely identifies the event. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' ++ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") ++ ++ // CloudeventsEventSourceKey is the attribute Key conforming to the ++ // "cloudevents.event_source" semantic conventions. It represents the ++ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) ++ // identifies the context in which an event happened. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'https://github.com/cloudevents', ++ // '/cloudevents/spec/pull/123', 'my-service' ++ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") ++ ++ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the ++ // "cloudevents.event_spec_version" semantic conventions. It represents the ++ // [version of the CloudEvents ++ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) ++ // which the event uses. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.0' ++ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") ++ ++ // CloudeventsEventTypeKey is the attribute Key conforming to the ++ // "cloudevents.event_type" semantic conventions. It represents the ++ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) ++ // contains a value describing the type of event related to the originating ++ // occurrence. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'com.github.pull_request.opened', ++ // 'com.example.object.deleted.v2' ++ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") ++ ++ // CloudeventsEventSubjectKey is the attribute Key conforming to the ++ // "cloudevents.event_subject" semantic conventions. It represents the ++ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) ++ // of the event in the context of the event producer (identified by ++ // source). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'mynewfile.jpg' ++ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ++) ++ ++// CloudeventsEventID returns an attribute KeyValue conforming to the ++// "cloudevents.event_id" semantic conventions. It represents the ++// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) ++// uniquely identifies the event. ++func CloudeventsEventID(val string) attribute.KeyValue { ++ return CloudeventsEventIDKey.String(val) ++} ++ ++// CloudeventsEventSource returns an attribute KeyValue conforming to the ++// "cloudevents.event_source" semantic conventions. It represents the ++// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) ++// identifies the context in which an event happened. ++func CloudeventsEventSource(val string) attribute.KeyValue { ++ return CloudeventsEventSourceKey.String(val) ++} ++ ++// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to ++// the "cloudevents.event_spec_version" semantic conventions. It represents the ++// [version of the CloudEvents ++// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) ++// which the event uses. ++func CloudeventsEventSpecVersion(val string) attribute.KeyValue { ++ return CloudeventsEventSpecVersionKey.String(val) ++} ++ ++// CloudeventsEventType returns an attribute KeyValue conforming to the ++// "cloudevents.event_type" semantic conventions. It represents the ++// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) ++// contains a value describing the type of event related to the originating ++// occurrence. ++func CloudeventsEventType(val string) attribute.KeyValue { ++ return CloudeventsEventTypeKey.String(val) ++} ++ ++// CloudeventsEventSubject returns an attribute KeyValue conforming to the ++// "cloudevents.event_subject" semantic conventions. It represents the ++// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) ++// of the event in the context of the event producer (identified by source). ++func CloudeventsEventSubject(val string) attribute.KeyValue { ++ return CloudeventsEventSubjectKey.String(val) ++} ++ ++// Semantic conventions for the OpenTracing Shim ++const ( ++ // OpentracingRefTypeKey is the attribute Key conforming to the ++ // "opentracing.ref_type" semantic conventions. It represents the ++ // parent-child Reference type ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: The causal relationship between a child Span and a parent Span. ++ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ++) ++ ++var ( ++ // The parent Span depends on the child Span in some capacity ++ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") ++ // The parent Span does not depend in any way on the result of the child Span ++ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ++) ++ ++// The attributes used to perform database client calls. ++const ( ++ // DBSystemKey is the attribute Key conforming to the "db.system" semantic ++ // conventions. It represents an identifier for the database management ++ // system (DBMS) product being used. See below for a list of well-known ++ // identifiers. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ DBSystemKey = attribute.Key("db.system") ++ ++ // DBConnectionStringKey is the attribute Key conforming to the ++ // "db.connection_string" semantic conventions. It represents the ++ // connection string used to connect to the database. It is recommended to ++ // remove embedded credentials. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' ++ DBConnectionStringKey = attribute.Key("db.connection_string") ++ ++ // DBUserKey is the attribute Key conforming to the "db.user" semantic ++ // conventions. It represents the username for accessing the database. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'readonly_user', 'reporting_user' ++ DBUserKey = attribute.Key("db.user") ++ ++ // DBJDBCDriverClassnameKey is the attribute Key conforming to the ++ // "db.jdbc.driver_classname" semantic conventions. It represents the ++ // fully-qualified class name of the [Java Database Connectivity ++ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) ++ // driver used to connect. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'org.postgresql.Driver', ++ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' ++ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") ++ ++ // DBNameKey is the attribute Key conforming to the "db.name" semantic ++ // conventions. It represents the this attribute is used to report the name ++ // of the database being accessed. For commands that switch the database, ++ // this should be set to the target database (even if the command fails). ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If applicable.) ++ // Stability: stable ++ // Examples: 'customers', 'main' ++ // Note: In some SQL databases, the database name to be used is called ++ // "schema name". In case there are multiple layers that could be ++ // considered for database name (e.g. Oracle instance name and schema ++ // name), the database name to be used is the more specific layer (e.g. ++ // Oracle schema name). ++ DBNameKey = attribute.Key("db.name") ++ ++ // DBStatementKey is the attribute Key conforming to the "db.statement" ++ // semantic conventions. It represents the database statement being ++ // executed. ++ // ++ // Type: string ++ // RequirementLevel: Recommended (Should be collected by default only if ++ // there is sanitization that excludes sensitive information.) ++ // Stability: stable ++ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' ++ DBStatementKey = attribute.Key("db.statement") ++ ++ // DBOperationKey is the attribute Key conforming to the "db.operation" ++ // semantic conventions. It represents the name of the operation being ++ // executed, e.g. the [MongoDB command ++ // name](https://docs.mongodb.com/manual/reference/command/#database-operations) ++ // such as `findAndModify`, or the SQL keyword. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If `db.statement` is not ++ // applicable.) ++ // Stability: stable ++ // Examples: 'findAndModify', 'HMSET', 'SELECT' ++ // Note: When setting this to an SQL keyword, it is not recommended to ++ // attempt any client-side parsing of `db.statement` just to get this ++ // property, but it should be set if the operation name is provided by the ++ // library being instrumented. If the SQL statement has an ambiguous ++ // operation, or performs more than one operation, this value may be ++ // omitted. ++ DBOperationKey = attribute.Key("db.operation") ++) ++ ++var ( ++ // Some other SQL database. Fallback only. See notes ++ DBSystemOtherSQL = DBSystemKey.String("other_sql") ++ // Microsoft SQL Server ++ DBSystemMSSQL = DBSystemKey.String("mssql") ++ // Microsoft SQL Server Compact ++ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") ++ // MySQL ++ DBSystemMySQL = DBSystemKey.String("mysql") ++ // Oracle Database ++ DBSystemOracle = DBSystemKey.String("oracle") ++ // IBM DB2 ++ DBSystemDB2 = DBSystemKey.String("db2") ++ // PostgreSQL ++ DBSystemPostgreSQL = DBSystemKey.String("postgresql") ++ // Amazon Redshift ++ DBSystemRedshift = DBSystemKey.String("redshift") ++ // Apache Hive ++ DBSystemHive = DBSystemKey.String("hive") ++ // Cloudscape ++ DBSystemCloudscape = DBSystemKey.String("cloudscape") ++ // HyperSQL DataBase ++ DBSystemHSQLDB = DBSystemKey.String("hsqldb") ++ // Progress Database ++ DBSystemProgress = DBSystemKey.String("progress") ++ // SAP MaxDB ++ DBSystemMaxDB = DBSystemKey.String("maxdb") ++ // SAP HANA ++ DBSystemHanaDB = DBSystemKey.String("hanadb") ++ // Ingres ++ DBSystemIngres = DBSystemKey.String("ingres") ++ // FirstSQL ++ DBSystemFirstSQL = DBSystemKey.String("firstsql") ++ // EnterpriseDB ++ DBSystemEDB = DBSystemKey.String("edb") ++ // InterSystems Caché ++ DBSystemCache = DBSystemKey.String("cache") ++ // Adabas (Adaptable Database System) ++ DBSystemAdabas = DBSystemKey.String("adabas") ++ // Firebird ++ DBSystemFirebird = DBSystemKey.String("firebird") ++ // Apache Derby ++ DBSystemDerby = DBSystemKey.String("derby") ++ // FileMaker ++ DBSystemFilemaker = DBSystemKey.String("filemaker") ++ // Informix ++ DBSystemInformix = DBSystemKey.String("informix") ++ // InstantDB ++ DBSystemInstantDB = DBSystemKey.String("instantdb") ++ // InterBase ++ DBSystemInterbase = DBSystemKey.String("interbase") ++ // MariaDB ++ DBSystemMariaDB = DBSystemKey.String("mariadb") ++ // Netezza ++ DBSystemNetezza = DBSystemKey.String("netezza") ++ // Pervasive PSQL ++ DBSystemPervasive = DBSystemKey.String("pervasive") ++ // PointBase ++ DBSystemPointbase = DBSystemKey.String("pointbase") ++ // SQLite ++ DBSystemSqlite = DBSystemKey.String("sqlite") ++ // Sybase ++ DBSystemSybase = DBSystemKey.String("sybase") ++ // Teradata ++ DBSystemTeradata = DBSystemKey.String("teradata") ++ // Vertica ++ DBSystemVertica = DBSystemKey.String("vertica") ++ // H2 ++ DBSystemH2 = DBSystemKey.String("h2") ++ // ColdFusion IMQ ++ DBSystemColdfusion = DBSystemKey.String("coldfusion") ++ // Apache Cassandra ++ DBSystemCassandra = DBSystemKey.String("cassandra") ++ // Apache HBase ++ DBSystemHBase = DBSystemKey.String("hbase") ++ // MongoDB ++ DBSystemMongoDB = DBSystemKey.String("mongodb") ++ // Redis ++ DBSystemRedis = DBSystemKey.String("redis") ++ // Couchbase ++ DBSystemCouchbase = DBSystemKey.String("couchbase") ++ // CouchDB ++ DBSystemCouchDB = DBSystemKey.String("couchdb") ++ // Microsoft Azure Cosmos DB ++ DBSystemCosmosDB = DBSystemKey.String("cosmosdb") ++ // Amazon DynamoDB ++ DBSystemDynamoDB = DBSystemKey.String("dynamodb") ++ // Neo4j ++ DBSystemNeo4j = DBSystemKey.String("neo4j") ++ // Apache Geode ++ DBSystemGeode = DBSystemKey.String("geode") ++ // Elasticsearch ++ DBSystemElasticsearch = DBSystemKey.String("elasticsearch") ++ // Memcached ++ DBSystemMemcached = DBSystemKey.String("memcached") ++ // CockroachDB ++ DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ++ // OpenSearch ++ DBSystemOpensearch = DBSystemKey.String("opensearch") ++ // ClickHouse ++ DBSystemClickhouse = DBSystemKey.String("clickhouse") ++ // Cloud Spanner ++ DBSystemSpanner = DBSystemKey.String("spanner") ++ // Trino ++ DBSystemTrino = DBSystemKey.String("trino") ++) ++ ++// DBConnectionString returns an attribute KeyValue conforming to the ++// "db.connection_string" semantic conventions. It represents the connection ++// string used to connect to the database. It is recommended to remove embedded ++// credentials. ++func DBConnectionString(val string) attribute.KeyValue { ++ return DBConnectionStringKey.String(val) ++} ++ ++// DBUser returns an attribute KeyValue conforming to the "db.user" semantic ++// conventions. It represents the username for accessing the database. ++func DBUser(val string) attribute.KeyValue { ++ return DBUserKey.String(val) ++} ++ ++// DBJDBCDriverClassname returns an attribute KeyValue conforming to the ++// "db.jdbc.driver_classname" semantic conventions. It represents the ++// fully-qualified class name of the [Java Database Connectivity ++// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver ++// used to connect. ++func DBJDBCDriverClassname(val string) attribute.KeyValue { ++ return DBJDBCDriverClassnameKey.String(val) ++} ++ ++// DBName returns an attribute KeyValue conforming to the "db.name" semantic ++// conventions. It represents the this attribute is used to report the name of ++// the database being accessed. For commands that switch the database, this ++// should be set to the target database (even if the command fails). ++func DBName(val string) attribute.KeyValue { ++ return DBNameKey.String(val) ++} ++ ++// DBStatement returns an attribute KeyValue conforming to the ++// "db.statement" semantic conventions. It represents the database statement ++// being executed. ++func DBStatement(val string) attribute.KeyValue { ++ return DBStatementKey.String(val) ++} ++ ++// DBOperation returns an attribute KeyValue conforming to the ++// "db.operation" semantic conventions. It represents the name of the operation ++// being executed, e.g. the [MongoDB command ++// name](https://docs.mongodb.com/manual/reference/command/#database-operations) ++// such as `findAndModify`, or the SQL keyword. ++func DBOperation(val string) attribute.KeyValue { ++ return DBOperationKey.String(val) ++} ++ ++// Connection-level attributes for Microsoft SQL Server ++const ( ++ // DBMSSQLInstanceNameKey is the attribute Key conforming to the ++ // "db.mssql.instance_name" semantic conventions. It represents the ++ // Microsoft SQL Server [instance ++ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) ++ // connecting to. This name is used to determine the port of a named ++ // instance. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MSSQLSERVER' ++ // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer ++ // required (but still recommended if non-standard). ++ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ++) ++ ++// DBMSSQLInstanceName returns an attribute KeyValue conforming to the ++// "db.mssql.instance_name" semantic conventions. It represents the Microsoft ++// SQL Server [instance ++// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) ++// connecting to. This name is used to determine the port of a named instance. ++func DBMSSQLInstanceName(val string) attribute.KeyValue { ++ return DBMSSQLInstanceNameKey.String(val) ++} ++ ++// Call-level attributes for Cassandra ++const ( ++ // DBCassandraPageSizeKey is the attribute Key conforming to the ++ // "db.cassandra.page_size" semantic conventions. It represents the fetch ++ // size used for paging, i.e. how many rows will be returned at once. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 5000 ++ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") ++ ++ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the ++ // "db.cassandra.consistency_level" semantic conventions. It represents the ++ // consistency level of the query. Based on consistency values from ++ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") ++ ++ // DBCassandraTableKey is the attribute Key conforming to the ++ // "db.cassandra.table" semantic conventions. It represents the name of the ++ // primary table that the operation is acting upon, including the keyspace ++ // name (if applicable). ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'mytable' ++ // Note: This mirrors the db.sql.table attribute but references cassandra ++ // rather than sql. It is not recommended to attempt any client-side ++ // parsing of `db.statement` just to get this property, but it should be ++ // set if it is provided by the library being instrumented. If the ++ // operation is acting upon an anonymous table, or more than one table, ++ // this value MUST NOT be set. ++ DBCassandraTableKey = attribute.Key("db.cassandra.table") ++ ++ // DBCassandraIdempotenceKey is the attribute Key conforming to the ++ // "db.cassandra.idempotence" semantic conventions. It represents the ++ // whether or not the query is idempotent. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") ++ ++ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming ++ // to the "db.cassandra.speculative_execution_count" semantic conventions. ++ // It represents the number of times a query was speculatively executed. ++ // Not set or `0` if the query was not executed speculatively. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 0, 2 ++ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") ++ ++ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the ++ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID ++ // of the coordinating node for a query. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' ++ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") ++ ++ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the ++ // "db.cassandra.coordinator.dc" semantic conventions. It represents the ++ // data center of the coordinating node for a query. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-west-2' ++ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ++) ++ ++var ( ++ // all ++ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") ++ // each_quorum ++ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") ++ // quorum ++ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") ++ // local_quorum ++ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") ++ // one ++ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") ++ // two ++ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") ++ // three ++ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") ++ // local_one ++ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") ++ // any ++ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") ++ // serial ++ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") ++ // local_serial ++ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ++) ++ ++// DBCassandraPageSize returns an attribute KeyValue conforming to the ++// "db.cassandra.page_size" semantic conventions. It represents the fetch size ++// used for paging, i.e. how many rows will be returned at once. ++func DBCassandraPageSize(val int) attribute.KeyValue { ++ return DBCassandraPageSizeKey.Int(val) ++} ++ ++// DBCassandraTable returns an attribute KeyValue conforming to the ++// "db.cassandra.table" semantic conventions. It represents the name of the ++// primary table that the operation is acting upon, including the keyspace name ++// (if applicable). ++func DBCassandraTable(val string) attribute.KeyValue { ++ return DBCassandraTableKey.String(val) ++} ++ ++// DBCassandraIdempotence returns an attribute KeyValue conforming to the ++// "db.cassandra.idempotence" semantic conventions. It represents the whether ++// or not the query is idempotent. ++func DBCassandraIdempotence(val bool) attribute.KeyValue { ++ return DBCassandraIdempotenceKey.Bool(val) ++} ++ ++// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue ++// conforming to the "db.cassandra.speculative_execution_count" semantic ++// conventions. It represents the number of times a query was speculatively ++// executed. Not set or `0` if the query was not executed speculatively. ++func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { ++ return DBCassandraSpeculativeExecutionCountKey.Int(val) ++} ++ ++// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the ++// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of ++// the coordinating node for a query. ++func DBCassandraCoordinatorID(val string) attribute.KeyValue { ++ return DBCassandraCoordinatorIDKey.String(val) ++} ++ ++// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the ++// "db.cassandra.coordinator.dc" semantic conventions. It represents the data ++// center of the coordinating node for a query. ++func DBCassandraCoordinatorDC(val string) attribute.KeyValue { ++ return DBCassandraCoordinatorDCKey.String(val) ++} ++ ++// Call-level attributes for Redis ++const ( ++ // DBRedisDBIndexKey is the attribute Key conforming to the ++ // "db.redis.database_index" semantic conventions. It represents the index ++ // of the database being accessed as used in the [`SELECT` ++ // command](https://redis.io/commands/select), provided as an integer. To ++ // be used instead of the generic `db.name` attribute. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If other than the default ++ // database (`0`).) ++ // Stability: stable ++ // Examples: 0, 1, 15 ++ DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ++) ++ ++// DBRedisDBIndex returns an attribute KeyValue conforming to the ++// "db.redis.database_index" semantic conventions. It represents the index of ++// the database being accessed as used in the [`SELECT` ++// command](https://redis.io/commands/select), provided as an integer. To be ++// used instead of the generic `db.name` attribute. ++func DBRedisDBIndex(val int) attribute.KeyValue { ++ return DBRedisDBIndexKey.Int(val) ++} ++ ++// Call-level attributes for MongoDB ++const ( ++ // DBMongoDBCollectionKey is the attribute Key conforming to the ++ // "db.mongodb.collection" semantic conventions. It represents the ++ // collection being accessed within the database stated in `db.name`. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'customers', 'products' ++ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ++) ++ ++// DBMongoDBCollection returns an attribute KeyValue conforming to the ++// "db.mongodb.collection" semantic conventions. It represents the collection ++// being accessed within the database stated in `db.name`. ++func DBMongoDBCollection(val string) attribute.KeyValue { ++ return DBMongoDBCollectionKey.String(val) ++} ++ ++// Call-level attributes for SQL databases ++const ( ++ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" ++ // semantic conventions. It represents the name of the primary table that ++ // the operation is acting upon, including the database name (if ++ // applicable). ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'public.users', 'customers' ++ // Note: It is not recommended to attempt any client-side parsing of ++ // `db.statement` just to get this property, but it should be set if it is ++ // provided by the library being instrumented. If the operation is acting ++ // upon an anonymous table, or more than one table, this value MUST NOT be ++ // set. ++ DBSQLTableKey = attribute.Key("db.sql.table") ++) ++ ++// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" ++// semantic conventions. It represents the name of the primary table that the ++// operation is acting upon, including the database name (if applicable). ++func DBSQLTable(val string) attribute.KeyValue { ++ return DBSQLTableKey.String(val) ++} ++ ++// Call-level attributes for Cosmos DB. ++const ( ++ // DBCosmosDBClientIDKey is the attribute Key conforming to the ++ // "db.cosmosdb.client_id" semantic conventions. It represents the unique ++ // Cosmos client instance id. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' ++ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") ++ ++ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the ++ // "db.cosmosdb.operation_type" semantic conventions. It represents the ++ // cosmosDB Operation Type. ++ // ++ // Type: Enum ++ // RequirementLevel: ConditionallyRequired (when performing one of the ++ // operations in this list) ++ // Stability: stable ++ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") ++ ++ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the ++ // "db.cosmosdb.connection_mode" semantic conventions. It represents the ++ // cosmos client connection mode. ++ // ++ // Type: Enum ++ // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as ++ // default)) ++ // Stability: stable ++ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") ++ ++ // DBCosmosDBContainerKey is the attribute Key conforming to the ++ // "db.cosmosdb.container" semantic conventions. It represents the cosmos ++ // DB container name. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (if available) ++ // Stability: stable ++ // Examples: 'anystring' ++ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") ++ ++ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the ++ // "db.cosmosdb.request_content_length" semantic conventions. It represents ++ // the request payload size in bytes ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") ++ ++ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the ++ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos ++ // DB status code. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (if response was received) ++ // Stability: stable ++ // Examples: 200, 201 ++ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") ++ ++ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the ++ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the ++ // cosmos DB sub status code. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (when response was received and ++ // contained sub-code.) ++ // Stability: stable ++ // Examples: 1000, 1002 ++ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") ++ ++ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the ++ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU ++ // consumed for that operation ++ // ++ // Type: double ++ // RequirementLevel: ConditionallyRequired (when available) ++ // Stability: stable ++ // Examples: 46.18, 1.0 ++ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") ++) ++ ++var ( ++ // invalid ++ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") ++ // create ++ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") ++ // patch ++ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") ++ // read ++ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") ++ // read_feed ++ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") ++ // delete ++ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") ++ // replace ++ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") ++ // execute ++ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") ++ // query ++ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") ++ // head ++ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") ++ // head_feed ++ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") ++ // upsert ++ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") ++ // batch ++ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") ++ // query_plan ++ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") ++ // execute_javascript ++ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") ++) ++ ++var ( ++ // Gateway (HTTP) connections mode ++ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") ++ // Direct connection ++ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") ++) ++ ++// DBCosmosDBClientID returns an attribute KeyValue conforming to the ++// "db.cosmosdb.client_id" semantic conventions. It represents the unique ++// Cosmos client instance id. ++func DBCosmosDBClientID(val string) attribute.KeyValue { ++ return DBCosmosDBClientIDKey.String(val) ++} ++ ++// DBCosmosDBContainer returns an attribute KeyValue conforming to the ++// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB ++// container name. ++func DBCosmosDBContainer(val string) attribute.KeyValue { ++ return DBCosmosDBContainerKey.String(val) ++} ++ ++// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming ++// to the "db.cosmosdb.request_content_length" semantic conventions. It ++// represents the request payload size in bytes ++func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { ++ return DBCosmosDBRequestContentLengthKey.Int(val) ++} ++ ++// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the ++// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB ++// status code. ++func DBCosmosDBStatusCode(val int) attribute.KeyValue { ++ return DBCosmosDBStatusCodeKey.Int(val) ++} ++ ++// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the ++// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos ++// DB sub status code. ++func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { ++ return DBCosmosDBSubStatusCodeKey.Int(val) ++} ++ ++// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the ++// "db.cosmosdb.request_charge" semantic conventions. It represents the rU ++// consumed for that operation ++func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { ++ return DBCosmosDBRequestChargeKey.Float64(val) ++} ++ ++// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's ++// concepts. ++const ( ++ // OTelStatusCodeKey is the attribute Key conforming to the ++ // "otel.status_code" semantic conventions. It represents the name of the ++ // code, either "OK" or "ERROR". MUST NOT be set if the status code is ++ // UNSET. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ OTelStatusCodeKey = attribute.Key("otel.status_code") ++ ++ // OTelStatusDescriptionKey is the attribute Key conforming to the ++ // "otel.status_description" semantic conventions. It represents the ++ // description of the Status if it has a value, otherwise not set. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'resource not found' ++ OTelStatusDescriptionKey = attribute.Key("otel.status_description") ++) ++ ++var ( ++ // The operation has been validated by an Application developer or Operator to have completed successfully ++ OTelStatusCodeOk = OTelStatusCodeKey.String("OK") ++ // The operation contains an error ++ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") ++) ++ ++// OTelStatusDescription returns an attribute KeyValue conforming to the ++// "otel.status_description" semantic conventions. It represents the ++// description of the Status if it has a value, otherwise not set. ++func OTelStatusDescription(val string) attribute.KeyValue { ++ return OTelStatusDescriptionKey.String(val) ++} ++ ++// This semantic convention describes an instance of a function that runs ++// without provisioning or managing of servers (also known as serverless ++// functions or Function as a Service (FaaS)) with spans. ++const ( ++ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" ++ // semantic conventions. It represents the type of the trigger which caused ++ // this function invocation. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: For the server/consumer span on the incoming side, ++ // `faas.trigger` MUST be set. ++ // ++ // Clients invoking FaaS instances usually cannot set `faas.trigger`, ++ // since they would typically need to look in the payload to determine ++ // the event type. If clients set it, it should be the same as the ++ // trigger that corresponding incoming would have (i.e., this has ++ // nothing to do with the underlying transport used to make the API ++ // call to invoke the lambda, which is often HTTP). ++ FaaSTriggerKey = attribute.Key("faas.trigger") ++ ++ // FaaSInvocationIDKey is the attribute Key conforming to the ++ // "faas.invocation_id" semantic conventions. It represents the invocation ++ // ID of the current function invocation. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' ++ FaaSInvocationIDKey = attribute.Key("faas.invocation_id") ++) ++ ++var ( ++ // A response to some data source operation such as a database or filesystem read/write ++ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") ++ // To provide an answer to an inbound HTTP request ++ FaaSTriggerHTTP = FaaSTriggerKey.String("http") ++ // A function is set to be executed when messages are sent to a messaging system ++ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") ++ // A function is scheduled to be executed regularly ++ FaaSTriggerTimer = FaaSTriggerKey.String("timer") ++ // If none of the others apply ++ FaaSTriggerOther = FaaSTriggerKey.String("other") ++) ++ ++// FaaSInvocationID returns an attribute KeyValue conforming to the ++// "faas.invocation_id" semantic conventions. It represents the invocation ID ++// of the current function invocation. ++func FaaSInvocationID(val string) attribute.KeyValue { ++ return FaaSInvocationIDKey.String(val) ++} ++ ++// Semantic Convention for FaaS triggered as a response to some data source ++// operation such as a database or filesystem read/write. ++const ( ++ // FaaSDocumentCollectionKey is the attribute Key conforming to the ++ // "faas.document.collection" semantic conventions. It represents the name ++ // of the source on which the triggering operation was performed. For ++ // example, in Cloud Storage or S3 corresponds to the bucket name, and in ++ // Cosmos DB to the database name. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myBucketName', 'myDBName' ++ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") ++ ++ // FaaSDocumentOperationKey is the attribute Key conforming to the ++ // "faas.document.operation" semantic conventions. It represents the ++ // describes the type of the operation that was performed on the data. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ FaaSDocumentOperationKey = attribute.Key("faas.document.operation") ++ ++ // FaaSDocumentTimeKey is the attribute Key conforming to the ++ // "faas.document.time" semantic conventions. It represents a string ++ // containing the time when the data was accessed in the [ISO ++ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2020-01-23T13:47:06Z' ++ FaaSDocumentTimeKey = attribute.Key("faas.document.time") ++ ++ // FaaSDocumentNameKey is the attribute Key conforming to the ++ // "faas.document.name" semantic conventions. It represents the document ++ // name/table subjected to the operation. For example, in Cloud Storage or ++ // S3 is the name of the file, and in Cosmos DB the table name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'myFile.txt', 'myTableName' ++ FaaSDocumentNameKey = attribute.Key("faas.document.name") ++) ++ ++var ( ++ // When a new object is created ++ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") ++ // When an object is modified ++ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") ++ // When an object is deleted ++ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ++) ++ ++// FaaSDocumentCollection returns an attribute KeyValue conforming to the ++// "faas.document.collection" semantic conventions. It represents the name of ++// the source on which the triggering operation was performed. For example, in ++// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the ++// database name. ++func FaaSDocumentCollection(val string) attribute.KeyValue { ++ return FaaSDocumentCollectionKey.String(val) ++} ++ ++// FaaSDocumentTime returns an attribute KeyValue conforming to the ++// "faas.document.time" semantic conventions. It represents a string containing ++// the time when the data was accessed in the [ISO ++// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++func FaaSDocumentTime(val string) attribute.KeyValue { ++ return FaaSDocumentTimeKey.String(val) ++} ++ ++// FaaSDocumentName returns an attribute KeyValue conforming to the ++// "faas.document.name" semantic conventions. It represents the document ++// name/table subjected to the operation. For example, in Cloud Storage or S3 ++// is the name of the file, and in Cosmos DB the table name. ++func FaaSDocumentName(val string) attribute.KeyValue { ++ return FaaSDocumentNameKey.String(val) ++} ++ ++// Semantic Convention for FaaS scheduled to be executed regularly. ++const ( ++ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic ++ // conventions. It represents a string containing the function invocation ++ // time in the [ISO ++ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2020-01-23T13:47:06Z' ++ FaaSTimeKey = attribute.Key("faas.time") ++ ++ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic ++ // conventions. It represents a string containing the schedule period as ++ // [Cron ++ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0/5 * * * ? *' ++ FaaSCronKey = attribute.Key("faas.cron") ++) ++ ++// FaaSTime returns an attribute KeyValue conforming to the "faas.time" ++// semantic conventions. It represents a string containing the function ++// invocation time in the [ISO ++// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++func FaaSTime(val string) attribute.KeyValue { ++ return FaaSTimeKey.String(val) ++} ++ ++// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" ++// semantic conventions. It represents a string containing the schedule period ++// as [Cron ++// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). ++func FaaSCron(val string) attribute.KeyValue { ++ return FaaSCronKey.String(val) ++} ++ ++// Contains additional attributes for incoming FaaS spans. ++const ( ++ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" ++ // semantic conventions. It represents a boolean that is true if the ++ // serverless function is executed for the first time (aka cold-start). ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ FaaSColdstartKey = attribute.Key("faas.coldstart") ++) ++ ++// FaaSColdstart returns an attribute KeyValue conforming to the ++// "faas.coldstart" semantic conventions. It represents a boolean that is true ++// if the serverless function is executed for the first time (aka cold-start). ++func FaaSColdstart(val bool) attribute.KeyValue { ++ return FaaSColdstartKey.Bool(val) ++} ++ ++// Contains additional attributes for outgoing FaaS spans. ++const ( ++ // FaaSInvokedNameKey is the attribute Key conforming to the ++ // "faas.invoked_name" semantic conventions. It represents the name of the ++ // invoked function. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'my-function' ++ // Note: SHOULD be equal to the `faas.name` resource attribute of the ++ // invoked function. ++ FaaSInvokedNameKey = attribute.Key("faas.invoked_name") ++ ++ // FaaSInvokedProviderKey is the attribute Key conforming to the ++ // "faas.invoked_provider" semantic conventions. It represents the cloud ++ // provider of the invoked function. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the ++ // invoked function. ++ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") ++ ++ // FaaSInvokedRegionKey is the attribute Key conforming to the ++ // "faas.invoked_region" semantic conventions. It represents the cloud ++ // region of the invoked function. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (For some cloud providers, like ++ // AWS or GCP, the region in which a function is hosted is essential to ++ // uniquely identify the function and also part of its endpoint. Since it's ++ // part of the endpoint being called, the region is always known to ++ // clients. In these cases, `faas.invoked_region` MUST be set accordingly. ++ // If the region is unknown to the client or not required for identifying ++ // the invoked function, setting `faas.invoked_region` is optional.) ++ // Stability: stable ++ // Examples: 'eu-central-1' ++ // Note: SHOULD be equal to the `cloud.region` resource attribute of the ++ // invoked function. ++ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ++) ++ ++var ( ++ // Alibaba Cloud ++ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") ++ // Amazon Web Services ++ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") ++ // Microsoft Azure ++ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") ++ // Google Cloud Platform ++ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") ++ // Tencent Cloud ++ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ++) ++ ++// FaaSInvokedName returns an attribute KeyValue conforming to the ++// "faas.invoked_name" semantic conventions. It represents the name of the ++// invoked function. ++func FaaSInvokedName(val string) attribute.KeyValue { ++ return FaaSInvokedNameKey.String(val) ++} ++ ++// FaaSInvokedRegion returns an attribute KeyValue conforming to the ++// "faas.invoked_region" semantic conventions. It represents the cloud region ++// of the invoked function. ++func FaaSInvokedRegion(val string) attribute.KeyValue { ++ return FaaSInvokedRegionKey.String(val) ++} ++ ++// Operations that access some remote service. ++const ( ++ // PeerServiceKey is the attribute Key conforming to the "peer.service" ++ // semantic conventions. It represents the ++ // [`service.name`](/docs/resource/README.md#service) of the remote ++ // service. SHOULD be equal to the actual `service.name` resource attribute ++ // of the remote service if any. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'AuthTokenCache' ++ PeerServiceKey = attribute.Key("peer.service") ++) ++ ++// PeerService returns an attribute KeyValue conforming to the ++// "peer.service" semantic conventions. It represents the ++// [`service.name`](/docs/resource/README.md#service) of the remote service. ++// SHOULD be equal to the actual `service.name` resource attribute of the ++// remote service if any. ++func PeerService(val string) attribute.KeyValue { ++ return PeerServiceKey.String(val) ++} ++ ++// These attributes may be used for any operation with an authenticated and/or ++// authorized enduser. ++const ( ++ // EnduserIDKey is the attribute Key conforming to the "enduser.id" ++ // semantic conventions. It represents the username or client_id extracted ++ // from the access token or ++ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header ++ // in the inbound request from outside the system. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'username' ++ EnduserIDKey = attribute.Key("enduser.id") ++ ++ // EnduserRoleKey is the attribute Key conforming to the "enduser.role" ++ // semantic conventions. It represents the actual/assumed role the client ++ // is making the request under extracted from token or application security ++ // context. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'admin' ++ EnduserRoleKey = attribute.Key("enduser.role") ++ ++ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" ++ // semantic conventions. It represents the scopes or granted authorities ++ // the client currently possesses extracted from token or application ++ // security context. The value would come from the scope associated with an ++ // [OAuth 2.0 Access ++ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute ++ // value in a [SAML 2.0 ++ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'read:message, write:files' ++ EnduserScopeKey = attribute.Key("enduser.scope") ++) ++ ++// EnduserID returns an attribute KeyValue conforming to the "enduser.id" ++// semantic conventions. It represents the username or client_id extracted from ++// the access token or ++// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in ++// the inbound request from outside the system. ++func EnduserID(val string) attribute.KeyValue { ++ return EnduserIDKey.String(val) ++} ++ ++// EnduserRole returns an attribute KeyValue conforming to the ++// "enduser.role" semantic conventions. It represents the actual/assumed role ++// the client is making the request under extracted from token or application ++// security context. ++func EnduserRole(val string) attribute.KeyValue { ++ return EnduserRoleKey.String(val) ++} ++ ++// EnduserScope returns an attribute KeyValue conforming to the ++// "enduser.scope" semantic conventions. It represents the scopes or granted ++// authorities the client currently possesses extracted from token or ++// application security context. The value would come from the scope associated ++// with an [OAuth 2.0 Access ++// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute ++// value in a [SAML 2.0 ++// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). ++func EnduserScope(val string) attribute.KeyValue { ++ return EnduserScopeKey.String(val) ++} ++ ++// These attributes may be used for any operation to store information about a ++// thread that started a span. ++const ( ++ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic ++ // conventions. It represents the current "managed" thread ID (as opposed ++ // to OS thread ID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ ThreadIDKey = attribute.Key("thread.id") ++ ++ // ThreadNameKey is the attribute Key conforming to the "thread.name" ++ // semantic conventions. It represents the current thread name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'main' ++ ThreadNameKey = attribute.Key("thread.name") ++) ++ ++// ThreadID returns an attribute KeyValue conforming to the "thread.id" ++// semantic conventions. It represents the current "managed" thread ID (as ++// opposed to OS thread ID). ++func ThreadID(val int) attribute.KeyValue { ++ return ThreadIDKey.Int(val) ++} ++ ++// ThreadName returns an attribute KeyValue conforming to the "thread.name" ++// semantic conventions. It represents the current thread name. ++func ThreadName(val string) attribute.KeyValue { ++ return ThreadNameKey.String(val) ++} ++ ++// These attributes allow to report this unit of code and therefore to provide ++// more context about the span. ++const ( ++ // CodeFunctionKey is the attribute Key conforming to the "code.function" ++ // semantic conventions. It represents the method or function name, or ++ // equivalent (usually rightmost part of the code unit's name). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'serveRequest' ++ CodeFunctionKey = attribute.Key("code.function") ++ ++ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" ++ // semantic conventions. It represents the "namespace" within which ++ // `code.function` is defined. Usually the qualified class or module name, ++ // such that `code.namespace` + some separator + `code.function` form a ++ // unique identifier for the code unit. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'com.example.MyHTTPService' ++ CodeNamespaceKey = attribute.Key("code.namespace") ++ ++ // CodeFilepathKey is the attribute Key conforming to the "code.filepath" ++ // semantic conventions. It represents the source code file name that ++ // identifies the code unit as uniquely as possible (preferably an absolute ++ // file path). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/usr/local/MyApplication/content_root/app/index.php' ++ CodeFilepathKey = attribute.Key("code.filepath") ++ ++ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" ++ // semantic conventions. It represents the line number in `code.filepath` ++ // best representing the operation. It SHOULD point within the code unit ++ // named in `code.function`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ CodeLineNumberKey = attribute.Key("code.lineno") ++ ++ // CodeColumnKey is the attribute Key conforming to the "code.column" ++ // semantic conventions. It represents the column number in `code.filepath` ++ // best representing the operation. It SHOULD point within the code unit ++ // named in `code.function`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 16 ++ CodeColumnKey = attribute.Key("code.column") ++) ++ ++// CodeFunction returns an attribute KeyValue conforming to the ++// "code.function" semantic conventions. It represents the method or function ++// name, or equivalent (usually rightmost part of the code unit's name). ++func CodeFunction(val string) attribute.KeyValue { ++ return CodeFunctionKey.String(val) ++} ++ ++// CodeNamespace returns an attribute KeyValue conforming to the ++// "code.namespace" semantic conventions. It represents the "namespace" within ++// which `code.function` is defined. Usually the qualified class or module ++// name, such that `code.namespace` + some separator + `code.function` form a ++// unique identifier for the code unit. ++func CodeNamespace(val string) attribute.KeyValue { ++ return CodeNamespaceKey.String(val) ++} ++ ++// CodeFilepath returns an attribute KeyValue conforming to the ++// "code.filepath" semantic conventions. It represents the source code file ++// name that identifies the code unit as uniquely as possible (preferably an ++// absolute file path). ++func CodeFilepath(val string) attribute.KeyValue { ++ return CodeFilepathKey.String(val) ++} ++ ++// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" ++// semantic conventions. It represents the line number in `code.filepath` best ++// representing the operation. It SHOULD point within the code unit named in ++// `code.function`. ++func CodeLineNumber(val int) attribute.KeyValue { ++ return CodeLineNumberKey.Int(val) ++} ++ ++// CodeColumn returns an attribute KeyValue conforming to the "code.column" ++// semantic conventions. It represents the column number in `code.filepath` ++// best representing the operation. It SHOULD point within the code unit named ++// in `code.function`. ++func CodeColumn(val int) attribute.KeyValue { ++ return CodeColumnKey.Int(val) ++} ++ ++// Semantic Convention for HTTP Client ++const ( ++ // HTTPResendCountKey is the attribute Key conforming to the ++ // "http.resend_count" semantic conventions. It represents the ordinal ++ // number of request resending attempt (for any reason, including ++ // redirects). ++ // ++ // Type: int ++ // RequirementLevel: Recommended (if and only if request was retried.) ++ // Stability: stable ++ // Examples: 3 ++ // Note: The resend count SHOULD be updated each time an HTTP request gets ++ // resent by the client, regardless of what was the cause of the resending ++ // (e.g. redirection, authorization failure, 503 Server Unavailable, ++ // network issues, or any other). ++ HTTPResendCountKey = attribute.Key("http.resend_count") ++) ++ ++// HTTPResendCount returns an attribute KeyValue conforming to the ++// "http.resend_count" semantic conventions. It represents the ordinal number ++// of request resending attempt (for any reason, including redirects). ++func HTTPResendCount(val int) attribute.KeyValue { ++ return HTTPResendCountKey.Int(val) ++} ++ ++// The `aws` conventions apply to operations using the AWS SDK. They map ++// request or response parameters in AWS SDK API calls to attributes on a Span. ++// The conventions have been collected over time based on feedback from AWS ++// users of tracing and will continue to evolve as new interesting conventions ++// are found. ++// Some descriptions are also provided for populating general OpenTelemetry ++// semantic conventions based on these APIs. ++const ( ++ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" ++ // semantic conventions. It represents the AWS request ID as returned in ++ // the response headers `x-amz-request-id` or `x-amz-requestid`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' ++ AWSRequestIDKey = attribute.Key("aws.request_id") ++) ++ ++// AWSRequestID returns an attribute KeyValue conforming to the ++// "aws.request_id" semantic conventions. It represents the AWS request ID as ++// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. ++func AWSRequestID(val string) attribute.KeyValue { ++ return AWSRequestIDKey.String(val) ++} ++ ++// Attributes that exist for multiple DynamoDB request types. ++const ( ++ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the ++ // "aws.dynamodb.table_names" semantic conventions. It represents the keys ++ // in the `RequestItems` object field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Users', 'Cats' ++ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") ++ ++ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the ++ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the ++ // JSON-serialized value of each item in the `ConsumedCapacity` response ++ // field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { ++ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : ++ // { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": ++ // { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number }, "TableName": "string", ++ // "WriteCapacityUnits": number }' ++ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") ++ ++ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to ++ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It ++ // represents the JSON-serialized value of the `ItemCollectionMetrics` ++ // response field. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": ++ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { ++ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], ++ // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, ++ // "SizeEstimateRangeGB": [ number ] } ] }' ++ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") ++ ++ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to ++ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It ++ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` ++ // request parameter. ++ // ++ // Type: double ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1.0, 2.0 ++ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") ++ ++ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming ++ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. ++ // It represents the value of the ++ // `ProvisionedThroughput.WriteCapacityUnits` request parameter. ++ // ++ // Type: double ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1.0, 2.0 ++ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") ++ ++ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the ++ // "aws.dynamodb.consistent_read" semantic conventions. It represents the ++ // value of the `ConsistentRead` request parameter. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") ++ ++ // AWSDynamoDBProjectionKey is the attribute Key conforming to the ++ // "aws.dynamodb.projection" semantic conventions. It represents the value ++ // of the `ProjectionExpression` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Title', 'Title, Price, Color', 'Title, Description, ++ // RelatedItems, ProductReviews' ++ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") ++ ++ // AWSDynamoDBLimitKey is the attribute Key conforming to the ++ // "aws.dynamodb.limit" semantic conventions. It represents the value of ++ // the `Limit` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") ++ ++ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the ++ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the ++ // value of the `AttributesToGet` request parameter. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'lives', 'id' ++ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") ++ ++ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the ++ // "aws.dynamodb.index_name" semantic conventions. It represents the value ++ // of the `IndexName` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'name_to_group' ++ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") ++ ++ // AWSDynamoDBSelectKey is the attribute Key conforming to the ++ // "aws.dynamodb.select" semantic conventions. It represents the value of ++ // the `Select` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'ALL_ATTRIBUTES', 'COUNT' ++ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ++) ++ ++// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the ++// "aws.dynamodb.table_names" semantic conventions. It represents the keys in ++// the `RequestItems` object field. ++func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { ++ return AWSDynamoDBTableNamesKey.StringSlice(val) ++} ++ ++// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to ++// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the ++// JSON-serialized value of each item in the `ConsumedCapacity` response field. ++func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { ++ return AWSDynamoDBConsumedCapacityKey.StringSlice(val) ++} ++ ++// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming ++// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It ++// represents the JSON-serialized value of the `ItemCollectionMetrics` response ++// field. ++func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { ++ return AWSDynamoDBItemCollectionMetricsKey.String(val) ++} ++ ++// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue ++// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic ++// conventions. It represents the value of the ++// `ProvisionedThroughput.ReadCapacityUnits` request parameter. ++func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { ++ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) ++} ++ ++// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue ++// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic ++// conventions. It represents the value of the ++// `ProvisionedThroughput.WriteCapacityUnits` request parameter. ++func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { ++ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) ++} ++ ++// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the ++// "aws.dynamodb.consistent_read" semantic conventions. It represents the value ++// of the `ConsistentRead` request parameter. ++func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { ++ return AWSDynamoDBConsistentReadKey.Bool(val) ++} ++ ++// AWSDynamoDBProjection returns an attribute KeyValue conforming to the ++// "aws.dynamodb.projection" semantic conventions. It represents the value of ++// the `ProjectionExpression` request parameter. ++func AWSDynamoDBProjection(val string) attribute.KeyValue { ++ return AWSDynamoDBProjectionKey.String(val) ++} ++ ++// AWSDynamoDBLimit returns an attribute KeyValue conforming to the ++// "aws.dynamodb.limit" semantic conventions. It represents the value of the ++// `Limit` request parameter. ++func AWSDynamoDBLimit(val int) attribute.KeyValue { ++ return AWSDynamoDBLimitKey.Int(val) ++} ++ ++// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to ++// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the ++// value of the `AttributesToGet` request parameter. ++func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { ++ return AWSDynamoDBAttributesToGetKey.StringSlice(val) ++} ++ ++// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the ++// "aws.dynamodb.index_name" semantic conventions. It represents the value of ++// the `IndexName` request parameter. ++func AWSDynamoDBIndexName(val string) attribute.KeyValue { ++ return AWSDynamoDBIndexNameKey.String(val) ++} ++ ++// AWSDynamoDBSelect returns an attribute KeyValue conforming to the ++// "aws.dynamodb.select" semantic conventions. It represents the value of the ++// `Select` request parameter. ++func AWSDynamoDBSelect(val string) attribute.KeyValue { ++ return AWSDynamoDBSelectKey.String(val) ++} ++ ++// DynamoDB.CreateTable ++const ( ++ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to ++ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It ++ // represents the JSON-serialized value of each item of the ++ // `GlobalSecondaryIndexes` request field ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": ++ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ ++ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { ++ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' ++ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") ++ ++ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to ++ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It ++ // represents the JSON-serialized value of each item of the ++ // `LocalSecondaryIndexes` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "IndexARN": "string", "IndexName": "string", ++ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { ++ // "AttributeName": "string", "KeyType": "string" } ], "Projection": { ++ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' ++ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ++) ++ ++// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue ++// conforming to the "aws.dynamodb.global_secondary_indexes" semantic ++// conventions. It represents the JSON-serialized value of each item of the ++// `GlobalSecondaryIndexes` request field ++func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { ++ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) ++} ++ ++// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming ++// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It ++// represents the JSON-serialized value of each item of the ++// `LocalSecondaryIndexes` request field. ++func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { ++ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) ++} ++ ++// DynamoDB.ListTables ++const ( ++ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the ++ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents ++ // the value of the `ExclusiveStartTableName` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Users', 'CatsTable' ++ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") ++ ++ // AWSDynamoDBTableCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.table_count" semantic conventions. It represents the the ++ // number of items in the `TableNames` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 20 ++ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ++) ++ ++// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming ++// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It ++// represents the value of the `ExclusiveStartTableName` request parameter. ++func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { ++ return AWSDynamoDBExclusiveStartTableKey.String(val) ++} ++ ++// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.table_count" semantic conventions. It represents the the ++// number of items in the `TableNames` response parameter. ++func AWSDynamoDBTableCount(val int) attribute.KeyValue { ++ return AWSDynamoDBTableCountKey.Int(val) ++} ++ ++// DynamoDB.Query ++const ( ++ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the ++ // "aws.dynamodb.scan_forward" semantic conventions. It represents the ++ // value of the `ScanIndexForward` request parameter. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ++) ++ ++// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the ++// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of ++// the `ScanIndexForward` request parameter. ++func AWSDynamoDBScanForward(val bool) attribute.KeyValue { ++ return AWSDynamoDBScanForwardKey.Bool(val) ++} ++ ++// DynamoDB.Scan ++const ( ++ // AWSDynamoDBSegmentKey is the attribute Key conforming to the ++ // "aws.dynamodb.segment" semantic conventions. It represents the value of ++ // the `Segment` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") ++ ++ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the ++ // "aws.dynamodb.total_segments" semantic conventions. It represents the ++ // value of the `TotalSegments` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 100 ++ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") ++ ++ // AWSDynamoDBCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.count" semantic conventions. It represents the value of ++ // the `Count` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") ++ ++ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.scanned_count" semantic conventions. It represents the ++ // value of the `ScannedCount` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 50 ++ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ++) ++ ++// AWSDynamoDBSegment returns an attribute KeyValue conforming to the ++// "aws.dynamodb.segment" semantic conventions. It represents the value of the ++// `Segment` request parameter. ++func AWSDynamoDBSegment(val int) attribute.KeyValue { ++ return AWSDynamoDBSegmentKey.Int(val) ++} ++ ++// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the ++// "aws.dynamodb.total_segments" semantic conventions. It represents the value ++// of the `TotalSegments` request parameter. ++func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { ++ return AWSDynamoDBTotalSegmentsKey.Int(val) ++} ++ ++// AWSDynamoDBCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.count" semantic conventions. It represents the value of the ++// `Count` response parameter. ++func AWSDynamoDBCount(val int) attribute.KeyValue { ++ return AWSDynamoDBCountKey.Int(val) ++} ++ ++// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.scanned_count" semantic conventions. It represents the value ++// of the `ScannedCount` response parameter. ++func AWSDynamoDBScannedCount(val int) attribute.KeyValue { ++ return AWSDynamoDBScannedCountKey.Int(val) ++} ++ ++// DynamoDB.UpdateTable ++const ( ++ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to ++ // the "aws.dynamodb.attribute_definitions" semantic conventions. It ++ // represents the JSON-serialized value of each item in the ++ // `AttributeDefinitions` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' ++ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") ++ ++ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key ++ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic ++ // conventions. It represents the JSON-serialized value of each item in the ++ // the `GlobalSecondaryIndexUpdates` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { ++ // "AttributeName": "string", "KeyType": "string" } ], "Projection": { ++ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, ++ // "ProvisionedThroughput": { "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }' ++ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ++) ++ ++// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming ++// to the "aws.dynamodb.attribute_definitions" semantic conventions. It ++// represents the JSON-serialized value of each item in the ++// `AttributeDefinitions` request field. ++func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { ++ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) ++} ++ ++// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue ++// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic ++// conventions. It represents the JSON-serialized value of each item in the the ++// `GlobalSecondaryIndexUpdates` request field. ++func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { ++ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) ++} ++ ++// Attributes that exist for S3 request types. ++const ( ++ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" ++ // semantic conventions. It represents the S3 bucket name the request ++ // refers to. Corresponds to the `--bucket` parameter of the [S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) ++ // operations. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'some-bucket-name' ++ // Note: The `bucket` attribute is applicable to all S3 operations that ++ // reference a bucket, i.e. that require the bucket name as a mandatory ++ // parameter. ++ // This applies to almost all S3 operations except `list-buckets`. ++ AWSS3BucketKey = attribute.Key("aws.s3.bucket") ++ ++ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic ++ // conventions. It represents the S3 object key the request refers to. ++ // Corresponds to the `--key` parameter of the [S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) ++ // operations. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'someFile.yml' ++ // Note: The `key` attribute is applicable to all object-related S3 ++ // operations, i.e. that require the object key as a mandatory parameter. ++ // This applies in particular to the following operations: ++ // ++ // - ++ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) ++ // - ++ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) ++ // - ++ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) ++ // - ++ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) ++ // - ++ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) ++ // - ++ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) ++ // - ++ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) ++ // - ++ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) ++ // - ++ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) ++ // - ++ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) ++ // - ++ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) ++ // - ++ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) ++ // - ++ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) ++ AWSS3KeyKey = attribute.Key("aws.s3.key") ++ ++ // AWSS3CopySourceKey is the attribute Key conforming to the ++ // "aws.s3.copy_source" semantic conventions. It represents the source ++ // object (in the form `bucket`/`key`) for the copy operation. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'someFile.yml' ++ // Note: The `copy_source` attribute applies to S3 copy operations and ++ // corresponds to the `--copy-source` parameter ++ // of the [copy-object operation within the S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). ++ // This applies in particular to the following operations: ++ // ++ // - ++ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) ++ // - ++ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) ++ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") ++ ++ // AWSS3UploadIDKey is the attribute Key conforming to the ++ // "aws.s3.upload_id" semantic conventions. It represents the upload ID ++ // that identifies the multipart upload. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' ++ // Note: The `upload_id` attribute applies to S3 multipart-upload ++ // operations and corresponds to the `--upload-id` parameter ++ // of the [S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) ++ // multipart operations. ++ // This applies in particular to the following operations: ++ // ++ // - ++ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) ++ // - ++ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) ++ // - ++ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) ++ // - ++ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) ++ // - ++ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) ++ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") ++ ++ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" ++ // semantic conventions. It represents the delete request container that ++ // specifies the objects to be deleted. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' ++ // Note: The `delete` attribute is only applicable to the ++ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) ++ // operation. ++ // The `delete` attribute corresponds to the `--delete` parameter of the ++ // [delete-objects operation within the S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). ++ AWSS3DeleteKey = attribute.Key("aws.s3.delete") ++ ++ // AWSS3PartNumberKey is the attribute Key conforming to the ++ // "aws.s3.part_number" semantic conventions. It represents the part number ++ // of the part being uploaded in a multipart-upload operation. This is a ++ // positive integer between 1 and 10,000. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3456 ++ // Note: The `part_number` attribute is only applicable to the ++ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) ++ // and ++ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) ++ // operations. ++ // The `part_number` attribute corresponds to the `--part-number` parameter ++ // of the ++ // [upload-part operation within the S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). ++ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") ++) ++ ++// AWSS3Bucket returns an attribute KeyValue conforming to the ++// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the ++// request refers to. Corresponds to the `--bucket` parameter of the [S3 ++// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) ++// operations. ++func AWSS3Bucket(val string) attribute.KeyValue { ++ return AWSS3BucketKey.String(val) ++} ++ ++// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" ++// semantic conventions. It represents the S3 object key the request refers to. ++// Corresponds to the `--key` parameter of the [S3 ++// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) ++// operations. ++func AWSS3Key(val string) attribute.KeyValue { ++ return AWSS3KeyKey.String(val) ++} ++ ++// AWSS3CopySource returns an attribute KeyValue conforming to the ++// "aws.s3.copy_source" semantic conventions. It represents the source object ++// (in the form `bucket`/`key`) for the copy operation. ++func AWSS3CopySource(val string) attribute.KeyValue { ++ return AWSS3CopySourceKey.String(val) ++} ++ ++// AWSS3UploadID returns an attribute KeyValue conforming to the ++// "aws.s3.upload_id" semantic conventions. It represents the upload ID that ++// identifies the multipart upload. ++func AWSS3UploadID(val string) attribute.KeyValue { ++ return AWSS3UploadIDKey.String(val) ++} ++ ++// AWSS3Delete returns an attribute KeyValue conforming to the ++// "aws.s3.delete" semantic conventions. It represents the delete request ++// container that specifies the objects to be deleted. ++func AWSS3Delete(val string) attribute.KeyValue { ++ return AWSS3DeleteKey.String(val) ++} ++ ++// AWSS3PartNumber returns an attribute KeyValue conforming to the ++// "aws.s3.part_number" semantic conventions. It represents the part number of ++// the part being uploaded in a multipart-upload operation. This is a positive ++// integer between 1 and 10,000. ++func AWSS3PartNumber(val int) attribute.KeyValue { ++ return AWSS3PartNumberKey.Int(val) ++} ++ ++// Semantic conventions to apply when instrumenting the GraphQL implementation. ++// They map GraphQL operations to attributes on a Span. ++const ( ++ // GraphqlOperationNameKey is the attribute Key conforming to the ++ // "graphql.operation.name" semantic conventions. It represents the name of ++ // the operation being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'findBookByID' ++ GraphqlOperationNameKey = attribute.Key("graphql.operation.name") ++ ++ // GraphqlOperationTypeKey is the attribute Key conforming to the ++ // "graphql.operation.type" semantic conventions. It represents the type of ++ // the operation being executed. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'query', 'mutation', 'subscription' ++ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") ++ ++ // GraphqlDocumentKey is the attribute Key conforming to the ++ // "graphql.document" semantic conventions. It represents the GraphQL ++ // document being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'query findBookByID { bookByID(id: ?) { name } }' ++ // Note: The value may be sanitized to exclude sensitive information. ++ GraphqlDocumentKey = attribute.Key("graphql.document") ++) ++ ++var ( ++ // GraphQL query ++ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") ++ // GraphQL mutation ++ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") ++ // GraphQL subscription ++ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ++) ++ ++// GraphqlOperationName returns an attribute KeyValue conforming to the ++// "graphql.operation.name" semantic conventions. It represents the name of the ++// operation being executed. ++func GraphqlOperationName(val string) attribute.KeyValue { ++ return GraphqlOperationNameKey.String(val) ++} ++ ++// GraphqlDocument returns an attribute KeyValue conforming to the ++// "graphql.document" semantic conventions. It represents the GraphQL document ++// being executed. ++func GraphqlDocument(val string) attribute.KeyValue { ++ return GraphqlDocumentKey.String(val) ++} ++ ++// General attributes used in messaging systems. ++const ( ++ // MessagingSystemKey is the attribute Key conforming to the ++ // "messaging.system" semantic conventions. It represents a string ++ // identifying the messaging system. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' ++ MessagingSystemKey = attribute.Key("messaging.system") ++ ++ // MessagingOperationKey is the attribute Key conforming to the ++ // "messaging.operation" semantic conventions. It represents a string ++ // identifying the kind of messaging operation as defined in the [Operation ++ // names](#operation-names) section above. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: If a custom value is used, it MUST be of low cardinality. ++ MessagingOperationKey = attribute.Key("messaging.operation") ++ ++ // MessagingBatchMessageCountKey is the attribute Key conforming to the ++ // "messaging.batch.message_count" semantic conventions. It represents the ++ // number of messages sent, received, or processed in the scope of the ++ // batching operation. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the span describes an ++ // operation on a batch of messages.) ++ // Stability: stable ++ // Examples: 0, 1, 2 ++ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on ++ // spans that operate with a single message. When a messaging client ++ // library supports both batch and single-message API for the same ++ // operation, instrumentations SHOULD use `messaging.batch.message_count` ++ // for batching APIs and SHOULD NOT use it for single-message APIs. ++ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") ++ ++ // MessagingClientIDKey is the attribute Key conforming to the ++ // "messaging.client_id" semantic conventions. It represents a unique ++ // identifier for the client that consumes or produces a message. ++ // ++ // Type: string ++ // RequirementLevel: Recommended (If a client id is available) ++ // Stability: stable ++ // Examples: 'client-5', 'myhost@8742@s8083jm' ++ MessagingClientIDKey = attribute.Key("messaging.client_id") ++) ++ ++var ( ++ // publish ++ MessagingOperationPublish = MessagingOperationKey.String("publish") ++ // receive ++ MessagingOperationReceive = MessagingOperationKey.String("receive") ++ // process ++ MessagingOperationProcess = MessagingOperationKey.String("process") ++) ++ ++// MessagingSystem returns an attribute KeyValue conforming to the ++// "messaging.system" semantic conventions. It represents a string identifying ++// the messaging system. ++func MessagingSystem(val string) attribute.KeyValue { ++ return MessagingSystemKey.String(val) ++} ++ ++// MessagingBatchMessageCount returns an attribute KeyValue conforming to ++// the "messaging.batch.message_count" semantic conventions. It represents the ++// number of messages sent, received, or processed in the scope of the batching ++// operation. ++func MessagingBatchMessageCount(val int) attribute.KeyValue { ++ return MessagingBatchMessageCountKey.Int(val) ++} ++ ++// MessagingClientID returns an attribute KeyValue conforming to the ++// "messaging.client_id" semantic conventions. It represents a unique ++// identifier for the client that consumes or produces a message. ++func MessagingClientID(val string) attribute.KeyValue { ++ return MessagingClientIDKey.String(val) ++} ++ ++// Semantic conventions for remote procedure calls. ++const ( ++ // RPCSystemKey is the attribute Key conforming to the "rpc.system" ++ // semantic conventions. It represents a string identifying the remoting ++ // system. See below for a list of well-known identifiers. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ RPCSystemKey = attribute.Key("rpc.system") ++ ++ // RPCServiceKey is the attribute Key conforming to the "rpc.service" ++ // semantic conventions. It represents the full (logical) name of the ++ // service being called, including its package name, if applicable. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'myservice.EchoService' ++ // Note: This is the logical name of the service from the RPC interface ++ // perspective, which can be different from the name of any implementing ++ // class. The `code.namespace` attribute may be used to store the latter ++ // (despite the attribute name, it may include a class name; e.g., class ++ // with method actually executing the call on the server side, RPC client ++ // stub class on the client side). ++ RPCServiceKey = attribute.Key("rpc.service") ++ ++ // RPCMethodKey is the attribute Key conforming to the "rpc.method" ++ // semantic conventions. It represents the name of the (logical) method ++ // being called, must be equal to the $method part in the span name. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'exampleMethod' ++ // Note: This is the logical name of the method from the RPC interface ++ // perspective, which can be different from the name of any implementing ++ // method/function. The `code.function` attribute may be used to store the ++ // latter (e.g., method actually executing the call on the server side, RPC ++ // client stub method on the client side). ++ RPCMethodKey = attribute.Key("rpc.method") ++) ++ ++var ( ++ // gRPC ++ RPCSystemGRPC = RPCSystemKey.String("grpc") ++ // Java RMI ++ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") ++ // .NET WCF ++ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") ++ // Apache Dubbo ++ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ++ // Connect RPC ++ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") ++) ++ ++// RPCService returns an attribute KeyValue conforming to the "rpc.service" ++// semantic conventions. It represents the full (logical) name of the service ++// being called, including its package name, if applicable. ++func RPCService(val string) attribute.KeyValue { ++ return RPCServiceKey.String(val) ++} ++ ++// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" ++// semantic conventions. It represents the name of the (logical) method being ++// called, must be equal to the $method part in the span name. ++func RPCMethod(val string) attribute.KeyValue { ++ return RPCMethodKey.String(val) ++} ++ ++// Tech-specific attributes for gRPC. ++const ( ++ // RPCGRPCStatusCodeKey is the attribute Key conforming to the ++ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric ++ // status ++ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of ++ // the gRPC request. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ++) ++ ++var ( ++ // OK ++ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) ++ // CANCELLED ++ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) ++ // UNKNOWN ++ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) ++ // INVALID_ARGUMENT ++ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) ++ // DEADLINE_EXCEEDED ++ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) ++ // NOT_FOUND ++ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) ++ // ALREADY_EXISTS ++ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) ++ // PERMISSION_DENIED ++ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) ++ // RESOURCE_EXHAUSTED ++ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) ++ // FAILED_PRECONDITION ++ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) ++ // ABORTED ++ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) ++ // OUT_OF_RANGE ++ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) ++ // UNIMPLEMENTED ++ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) ++ // INTERNAL ++ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) ++ // UNAVAILABLE ++ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) ++ // DATA_LOSS ++ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) ++ // UNAUTHENTICATED ++ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ++) ++ ++// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). ++const ( ++ // RPCJsonrpcVersionKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol ++ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 ++ // does not specify this, the value can be omitted. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If other than the default ++ // version (`1.0`)) ++ // Stability: stable ++ // Examples: '2.0', '1.0' ++ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") ++ ++ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` ++ // property of request or response. Since protocol allows id to be int, ++ // string, `null` or missing (for notifications), value is expected to be ++ // cast to string for simplicity. Use empty string in case of `null` value. ++ // Omit entirely if this is a notification. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '10', 'request-7', '' ++ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") ++ ++ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.error_code" semantic conventions. It represents the ++ // `error.code` property of response if it is an error response. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If response is not successful.) ++ // Stability: stable ++ // Examples: -32700, 100 ++ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") ++ ++ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.error_message" semantic conventions. It represents the ++ // `error.message` property of response if it is an error response. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Parse error', 'User already exists' ++ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ++) ++ ++// RPCJsonrpcVersion returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.version" semantic conventions. It represents the protocol ++// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 ++// does not specify this, the value can be omitted. ++func RPCJsonrpcVersion(val string) attribute.KeyValue { ++ return RPCJsonrpcVersionKey.String(val) ++} ++ ++// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` ++// property of request or response. Since protocol allows id to be int, string, ++// `null` or missing (for notifications), value is expected to be cast to ++// string for simplicity. Use empty string in case of `null` value. Omit ++// entirely if this is a notification. ++func RPCJsonrpcRequestID(val string) attribute.KeyValue { ++ return RPCJsonrpcRequestIDKey.String(val) ++} ++ ++// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.error_code" semantic conventions. It represents the ++// `error.code` property of response if it is an error response. ++func RPCJsonrpcErrorCode(val int) attribute.KeyValue { ++ return RPCJsonrpcErrorCodeKey.Int(val) ++} ++ ++// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.error_message" semantic conventions. It represents the ++// `error.message` property of response if it is an error response. ++func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { ++ return RPCJsonrpcErrorMessageKey.String(val) ++} ++ ++// Tech-specific attributes for Connect RPC. ++const ( ++ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the ++ // "rpc.connect_rpc.error_code" semantic conventions. It represents the ++ // [error codes](https://connect.build/docs/protocol/#error-codes) of the ++ // Connect request. Error codes are always string values. ++ // ++ // Type: Enum ++ // RequirementLevel: ConditionallyRequired (If response is not successful ++ // and if error code available.) ++ // Stability: stable ++ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") ++) ++ ++var ( ++ // cancelled ++ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") ++ // unknown ++ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") ++ // invalid_argument ++ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") ++ // deadline_exceeded ++ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") ++ // not_found ++ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") ++ // already_exists ++ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") ++ // permission_denied ++ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") ++ // resource_exhausted ++ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") ++ // failed_precondition ++ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") ++ // aborted ++ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") ++ // out_of_range ++ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") ++ // unimplemented ++ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") ++ // internal ++ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") ++ // unavailable ++ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") ++ // data_loss ++ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") ++ // unauthenticated ++ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") ++) +diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go +index f058cc781e0..3aadc66cf7a 100644 +--- a/vendor/go.opentelemetry.io/otel/trace/config.go ++++ b/vendor/go.opentelemetry.io/otel/trace/config.go +@@ -25,6 +25,7 @@ type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string ++ attrs attribute.Set + } + + // InstrumentationVersion returns the version of the library providing instrumentation. +@@ -32,6 +33,12 @@ func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion + } + ++// InstrumentationAttributes returns the attributes associated with the library ++// providing instrumentation. ++func (t *TracerConfig) InstrumentationAttributes() attribute.Set { ++ return t.attrs ++} ++ + // SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. + func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +@@ -261,6 +268,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c + } ++ + func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +@@ -307,6 +315,16 @@ func WithInstrumentationVersion(version string) TracerOption { + }) + } + ++// WithInstrumentationAttributes sets the instrumentation attributes. ++// ++// The passed attributes will be de-duplicated. ++func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { ++ return tracerOptionFunc(func(config TracerConfig) TracerConfig { ++ config.attrs = attribute.NewSet(attr...) ++ return config ++ }) ++} ++ + // WithSchemaURL sets the schema URL for the Tracer. + func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { +diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go +index 391417718f5..440f3d7565a 100644 +--- a/vendor/go.opentelemetry.io/otel/trace/doc.go ++++ b/vendor/go.opentelemetry.io/otel/trace/doc.go +@@ -17,7 +17,7 @@ Package trace provides an implementation of the tracing part of the + OpenTelemetry API. + + To participate in distributed traces a Span needs to be created for the +-operation being performed as part of a traced workflow. It its simplest form: ++operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + +@@ -62,5 +62,69 @@ a default. + defer span.End() + // ... + } ++ ++# API Implementations ++ ++This package does not conform to the standard Go versioning policy; all of its ++interfaces may have methods added to them without a package major version bump. ++This non-standard API evolution could surprise an uninformed implementation ++author. They could unknowingly build their implementation in a way that would ++result in a runtime panic for their users that update to the new API. ++ ++The API is designed to help inform an instrumentation author about this ++non-standard API evolution. It requires them to choose a default behavior for ++unimplemented interface methods. There are three behavior choices they can ++make: ++ ++ - Compilation failure ++ - Panic ++ - Default to another implementation ++ ++All interfaces in this API embed a corresponding interface from ++[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default ++behavior of their implementations to be a compilation failure, signaling to ++their users they need to update to the latest version of that implementation, ++they need to embed the corresponding interface from ++[go.opentelemetry.io/otel/trace/embedded] in their implementation. For ++example, ++ ++ import "go.opentelemetry.io/otel/trace/embedded" ++ ++ type TracerProvider struct { ++ embedded.TracerProvider ++ // ... ++ } ++ ++If an author wants the default behavior of their implementations to panic, they ++can embed the API interface directly. ++ ++ import "go.opentelemetry.io/otel/trace" ++ ++ type TracerProvider struct { ++ trace.TracerProvider ++ // ... ++ } ++ ++This option is not recommended. It will lead to publishing packages that ++contain runtime panics when users update to newer versions of ++[go.opentelemetry.io/otel/trace], which may be done with a trasitive ++dependency. ++ ++Finally, an author can embed another implementation in theirs. The embedded ++implementation will be used for methods not defined by the author. For example, ++an author who wants to default to silently dropping the call can use ++[go.opentelemetry.io/otel/trace/noop]: ++ ++ import "go.opentelemetry.io/otel/trace/noop" ++ ++ type TracerProvider struct { ++ noop.TracerProvider ++ // ... ++ } ++ ++It is strongly recommended that authors only embed ++[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. ++That implementation is the only one OpenTelemetry authors can guarantee will ++fully implement all the API interfaces when a user updates their API. + */ + package trace // import "go.opentelemetry.io/otel/trace" +diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +new file mode 100644 +index 00000000000..898db5a7546 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +@@ -0,0 +1,56 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package embedded provides interfaces embedded within the [OpenTelemetry ++// trace API]. ++// ++// Implementers of the [OpenTelemetry trace API] can embed the relevant type ++// from this package into their implementation directly. Doing so will result ++// in a compilation error for users when the [OpenTelemetry trace API] is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++// ++// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace ++package embedded // import "go.opentelemetry.io/otel/trace/embedded" ++ ++// TracerProvider is embedded in ++// [go.opentelemetry.io/otel/trace.TracerProvider]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to ++// experience a compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type TracerProvider interface{ tracerProvider() } ++ ++// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a ++// compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface ++// is extended (which is something that can happen without a major version bump ++// of the API package). ++type Tracer interface{ tracer() } ++ ++// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a ++// compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Span interface{ span() } +diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go +index 73950f20778..c125491caeb 100644 +--- a/vendor/go.opentelemetry.io/otel/trace/noop.go ++++ b/vendor/go.opentelemetry.io/otel/trace/noop.go +@@ -19,16 +19,20 @@ import ( + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" ++ "go.opentelemetry.io/otel/trace/embedded" + ) + + // NewNoopTracerProvider returns an implementation of TracerProvider that + // performs no operations. The Tracer and Spans created from the returned + // TracerProvider also perform no operations. ++// ++// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] ++// instead. + func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} + } + +-type noopTracerProvider struct{} ++type noopTracerProvider struct{ embedded.TracerProvider } + + var _ TracerProvider = noopTracerProvider{} + +@@ -37,8 +41,8 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} + } + +-// noopTracer is an implementation of Tracer that preforms no operations. +-type noopTracer struct{} ++// noopTracer is an implementation of Tracer that performs no operations. ++type noopTracer struct{ embedded.Tracer } + + var _ Tracer = noopTracer{} + +@@ -53,8 +57,8 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption + return ContextWithSpan(ctx, span), span + } + +-// noopSpan is an implementation of Span that preforms no operations. +-type noopSpan struct{} ++// noopSpan is an implementation of Span that performs no operations. ++type noopSpan struct{ embedded.Span } + + var _ Span = noopSpan{} + +diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +new file mode 100644 +index 00000000000..7f485543c47 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +@@ -0,0 +1,118 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package noop provides an implementation of the OpenTelemetry trace API that ++// produces no telemetry and minimizes used computation resources. ++// ++// Using this package to implement the OpenTelemetry trace API will effectively ++// disable OpenTelemetry. ++// ++// This implementation can be embedded in other implementations of the ++// OpenTelemetry trace API. Doing so will mean the implementation defaults to ++// no operation for methods it does not implement. ++package noop // import "go.opentelemetry.io/otel/trace/noop" ++ ++import ( ++ "context" ++ ++ "go.opentelemetry.io/otel/attribute" ++ "go.opentelemetry.io/otel/codes" ++ "go.opentelemetry.io/otel/trace" ++ "go.opentelemetry.io/otel/trace/embedded" ++) ++ ++var ( ++ // Compile-time check this implements the OpenTelemetry API. ++ ++ _ trace.TracerProvider = TracerProvider{} ++ _ trace.Tracer = Tracer{} ++ _ trace.Span = Span{} ++) ++ ++// TracerProvider is an OpenTelemetry No-Op TracerProvider. ++type TracerProvider struct{ embedded.TracerProvider } ++ ++// NewTracerProvider returns a TracerProvider that does not record any telemetry. ++func NewTracerProvider() TracerProvider { ++ return TracerProvider{} ++} ++ ++// Tracer returns an OpenTelemetry Tracer that does not record any telemetry. ++func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { ++ return Tracer{} ++} ++ ++// Tracer is an OpenTelemetry No-Op Tracer. ++type Tracer struct{ embedded.Tracer } ++ ++// Start creates a span. The created span will be set in a child context of ctx ++// and returned with the span. ++// ++// If ctx contains a span context, the returned span will also contain that ++// span context. If the span context in ctx is for a non-recording span, that ++// span instance will be returned directly. ++func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { ++ span := trace.SpanFromContext(ctx) ++ ++ // If the parent context contains a non-zero span context, that span ++ // context needs to be returned as a non-recording span ++ // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). ++ var zeroSC trace.SpanContext ++ if sc := span.SpanContext(); !sc.Equal(zeroSC) { ++ if !span.IsRecording() { ++ // If the span is not recording return it directly. ++ return ctx, span ++ } ++ // Otherwise, return the span context needs in a non-recording span. ++ span = Span{sc: sc} ++ } else { ++ // No parent, return a No-Op span with an empty span context. ++ span = Span{} ++ } ++ return trace.ContextWithSpan(ctx, span), span ++} ++ ++// Span is an OpenTelemetry No-Op Span. ++type Span struct { ++ embedded.Span ++ ++ sc trace.SpanContext ++} ++ ++// SpanContext returns an empty span context. ++func (s Span) SpanContext() trace.SpanContext { return s.sc } ++ ++// IsRecording always returns false. ++func (Span) IsRecording() bool { return false } ++ ++// SetStatus does nothing. ++func (Span) SetStatus(codes.Code, string) {} ++ ++// SetAttributes does nothing. ++func (Span) SetAttributes(...attribute.KeyValue) {} ++ ++// End does nothing. ++func (Span) End(...trace.SpanEndOption) {} ++ ++// RecordError does nothing. ++func (Span) RecordError(error, ...trace.EventOption) {} ++ ++// AddEvent does nothing. ++func (Span) AddEvent(string, ...trace.EventOption) {} ++ ++// SetName does nothing. ++func (Span) SetName(string) {} ++ ++// TracerProvider returns a No-Op TracerProvider. ++func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } +diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go +index 97f3d83855b..26a4b2260ec 100644 +--- a/vendor/go.opentelemetry.io/otel/trace/trace.go ++++ b/vendor/go.opentelemetry.io/otel/trace/trace.go +@@ -22,6 +22,7 @@ import ( + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" ++ "go.opentelemetry.io/otel/trace/embedded" + ) + + const ( +@@ -48,8 +49,10 @@ func (e errorConst) Error() string { + // nolint:revive // revive complains about stutter of `trace.TraceID`. + type TraceID [16]byte + +-var nilTraceID TraceID +-var _ json.Marshaler = nilTraceID ++var ( ++ nilTraceID TraceID ++ _ json.Marshaler = nilTraceID ++) + + // IsValid checks whether the trace TraceID is valid. A valid trace ID does + // not consist of zeros only. +@@ -71,8 +74,10 @@ func (t TraceID) String() string { + // SpanID is a unique identity of a span in a trace. + type SpanID [8]byte + +-var nilSpanID SpanID +-var _ json.Marshaler = nilSpanID ++var ( ++ nilSpanID SpanID ++ _ json.Marshaler = nilSpanID ++) + + // IsValid checks whether the SpanID is valid. A valid SpanID does not consist + // of zeros only. +@@ -338,8 +343,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { + // create a Span and it is then up to the operation the Span represents to + // properly end the Span when the operation itself ends. + // +-// Warning: methods may be added to this interface in minor releases. ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. + type Span interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Span ++ + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this +@@ -364,8 +376,9 @@ type Span interface { + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a +- // description, overriding previous values set. The description is only +- // included in a status when the code is for an error. ++ // description, provided the status hasn't already been set to a higher ++ // value before (OK > Error > Unset). The description is only included in a ++ // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. +@@ -485,8 +498,15 @@ func (sk SpanKind) String() string { + + // Tracer is the creator of Spans. + // +-// Warning: methods may be added to this interface in minor releases. ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. + type Tracer interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Tracer ++ + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created +@@ -517,8 +537,15 @@ type Tracer interface { + // at runtime from its users or it can simply use the globally registered one + // (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). + // +-// Warning: methods may be added to this interface in minor releases. ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. + type TracerProvider interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.TracerProvider ++ + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. +diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go +index ca68a82e5f7..d1e47ca2faa 100644 +--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go ++++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go +@@ -28,9 +28,9 @@ const ( + + // based on the W3C Trace Context specification, see + // https://www.w3.org/TR/trace-context-1/#tracestate-header +- noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` +- withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` +- valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` ++ noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*` ++ withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*` ++ valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" +@@ -40,9 +40,10 @@ const ( + ) + + var ( +- keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) +- valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) +- memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) ++ noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`) ++ withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`) ++ valueRe = regexp.MustCompile(`^` + valueFormat + `$`) ++ memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) + ) + + type member struct { +@@ -51,10 +52,19 @@ type member struct { + } + + func newMember(key, value string) (member, error) { +- if !keyRe.MatchString(key) { ++ if len(key) > 256 { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } +- if !valueRe.MatchString(value) { ++ if !noTenantKeyRe.MatchString(key) { ++ if !withTenantKeyRe.MatchString(key) { ++ return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) ++ } ++ atIndex := strings.LastIndex(key, "@") ++ if atIndex > 241 || len(key)-1-atIndex > 14 { ++ return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) ++ } ++ } ++ if len(value) > 256 || !valueRe.MatchString(value) { + return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) + } + return member{Key: key, Value: value}, nil +@@ -62,14 +72,14 @@ func newMember(key, value string) (member, error) { + + func parseMember(m string) (member, error) { + matches := memberRe.FindStringSubmatch(m) +- if len(matches) != 5 { ++ if len(matches) != 3 { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } +- +- return member{ +- Key: matches[1], +- Value: matches[4], +- }, nil ++ result, e := newMember(matches[1], matches[2]) ++ if e != nil { ++ return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) ++ } ++ return result, nil + } + + // String encodes member into a string compliant with the W3C Trace Context +diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go +index 806db41c555..5a92f1d4b6c 100644 +--- a/vendor/go.opentelemetry.io/otel/version.go ++++ b/vendor/go.opentelemetry.io/otel/version.go +@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" + + // Version is the current release version of OpenTelemetry in use. + func Version() string { +- return "1.10.0" ++ return "1.20.0" + } +diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml +index ec2ca16d270..82366e79981 100644 +--- a/vendor/go.opentelemetry.io/otel/versions.yaml ++++ b/vendor/go.opentelemetry.io/otel/versions.yaml +@@ -14,45 +14,42 @@ + + module-sets: + stable-v1: +- version: v1.10.0 ++ version: v1.20.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opentracing ++ - go.opentelemetry.io/otel/bridge/opentracing/test ++ - go.opentelemetry.io/otel/example/dice + - go.opentelemetry.io/otel/example/fib +- - go.opentelemetry.io/otel/example/jaeger + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin +- - go.opentelemetry.io/otel/exporters/jaeger +- - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp +- - go.opentelemetry.io/otel/exporters/otlp/internal/retry + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace +- - go.opentelemetry.io/otel/trace ++ - go.opentelemetry.io/otel/exporters/zipkin ++ - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk ++ - go.opentelemetry.io/otel/sdk/metric ++ - go.opentelemetry.io/otel/trace + experimental-metrics: +- version: v0.31.0 ++ version: v0.43.0 + modules: ++ - go.opentelemetry.io/otel/bridge/opencensus ++ - go.opentelemetry.io/otel/bridge/opencensus/test ++ - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/prometheus ++ - go.opentelemetry.io/otel/example/view + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/prometheus + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric +- - go.opentelemetry.io/otel/metric +- - go.opentelemetry.io/otel/sdk/metric + experimental-schema: +- version: v0.0.3 ++ version: v0.0.7 + modules: + - go.opentelemetry.io/otel/schema +- bridge: +- version: v0.31.0 +- modules: +- - go.opentelemetry.io/otel/bridge/opencensus +- - go.opentelemetry.io/otel/bridge/opencensus/test +- - go.opentelemetry.io/otel/example/opencensus + excluded-modules: + - go.opentelemetry.io/otel/internal/tools +diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go +index fc285c089e7..c1af04e84e5 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go +@@ -15,7 +15,7 @@ + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: + // protoc-gen-go v1.26.0 +-// protoc v3.17.3 ++// protoc v3.21.6 + // source: opentelemetry/proto/collector/trace/v1/trace_service.proto + + package v1 +diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go +index d142c2a447d..bb1bd261ed8 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go +@@ -77,20 +77,22 @@ func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMu + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) +- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace")) ++ var err error ++ var annotatedContext context.Context ++ annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } +- resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams) ++ resp, md, err := local_request_TraceService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) +- ctx = runtime.NewServerMetadataContext(ctx, md) ++ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { +- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) ++ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + +- forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) ++ forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + +@@ -139,19 +141,21 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) +- rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace")) ++ var err error ++ var annotatedContext context.Context ++ annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } +- resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) +- ctx = runtime.NewServerMetadataContext(ctx, md) ++ resp, md, err := request_TraceService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams) ++ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { +- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) ++ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + +- forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) ++ forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + +@@ -159,7 +163,7 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu + } + + var ( +- pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) ++ pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "traces"}, "")) + ) + + var ( +diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go +index c21f2cb47cf..dd1b73f1e99 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go +@@ -1,7 +1,7 @@ + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. + // versions: + // - protoc-gen-go-grpc v1.1.0 +-// - protoc v3.17.3 ++// - protoc v3.21.6 + // source: opentelemetry/proto/collector/trace/v1/trace_service.proto + + package v1 +diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +index 8502e607b25..852209b097b 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +@@ -15,7 +15,7 @@ + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: + // protoc-gen-go v1.26.0 +-// protoc v3.17.3 ++// protoc v3.21.6 + // source: opentelemetry/proto/common/v1/common.proto + + package v1 +@@ -361,8 +361,11 @@ type InstrumentationScope struct { + unknownFields protoimpl.UnknownFields + + // An empty instrumentation scope name means the name is unknown. +- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +- Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` ++ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` ++ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` ++ // Additional attributes that describe the scope. [Optional]. ++ // Attribute keys MUST be unique (it is not allowed to have more than one ++ // attribute with the same key). + Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + } +diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +index bcc1060e3dd..b7545b03b9f 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +@@ -15,7 +15,7 @@ + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: + // protoc-gen-go v1.26.0 +-// protoc v3.17.3 ++// protoc v3.21.6 + // source: opentelemetry/proto/resource/v1/resource.proto + + package v1 +diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +index 499a43d77bb..51a499816a6 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +@@ -15,7 +15,7 @@ + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: + // protoc-gen-go v1.26.0 +-// protoc v3.17.3 ++// protoc v3.21.6 + // source: opentelemetry/proto/trace/v1/trace.proto + + package v1 +@@ -117,8 +117,8 @@ type Status_StatusCode int32 + const ( + // The default status. + Status_STATUS_CODE_UNSET Status_StatusCode = 0 +- // The Span has been validated by an Application developers or Operator to have +- // completed successfully. ++ // The Span has been validated by an Application developer or Operator to ++ // have completed successfully. + Status_STATUS_CODE_OK Status_StatusCode = 1 + // The Span contains an error. + Status_STATUS_CODE_ERROR Status_StatusCode = 2 +@@ -374,20 +374,16 @@ type Span struct { + unknownFields protoimpl.UnknownFields + + // A unique identifier for a trace. All spans from the same trace share +- // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes +- // is considered invalid. +- // +- // This field is semantically required. Receiver should generate new +- // random trace_id if empty or invalid trace_id was received. ++ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR ++ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON ++ // is zero-length and thus is also invalid). + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span +- // is created. The ID is an 8-byte array. An ID with all zeroes is considered +- // invalid. +- // +- // This field is semantically required. Receiver should generate new +- // random span_id if empty or invalid span_id was received. ++ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length ++ // other than 8 bytes is considered invalid (empty string in OTLP/JSON ++ // is zero-length and thus is also invalid). + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` +@@ -433,8 +429,8 @@ type Span struct { + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 +- // "abc.com/myattribute": true +- // "abc.com/score": 10.239 ++ // "example.com/myattribute": true ++ // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go +new file mode 100644 +index 00000000000..93da7322bc4 +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go +@@ -0,0 +1,98 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its ++// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and ++// draft-irtf-cfrg-xchacha-01. ++package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" ++ ++import ( ++ "crypto/cipher" ++ "errors" ++) ++ ++const ( ++ // KeySize is the size of the key used by this AEAD, in bytes. ++ KeySize = 32 ++ ++ // NonceSize is the size of the nonce used with the standard variant of this ++ // AEAD, in bytes. ++ // ++ // Note that this is too short to be safely generated at random if the same ++ // key is reused more than 2³² times. ++ NonceSize = 12 ++ ++ // NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305 ++ // variant of this AEAD, in bytes. ++ NonceSizeX = 24 ++ ++ // Overhead is the size of the Poly1305 authentication tag, and the ++ // difference between a ciphertext length and its plaintext. ++ Overhead = 16 ++) ++ ++type chacha20poly1305 struct { ++ key [KeySize]byte ++} ++ ++// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key. ++func New(key []byte) (cipher.AEAD, error) { ++ if len(key) != KeySize { ++ return nil, errors.New("chacha20poly1305: bad key length") ++ } ++ ret := new(chacha20poly1305) ++ copy(ret.key[:], key) ++ return ret, nil ++} ++ ++func (c *chacha20poly1305) NonceSize() int { ++ return NonceSize ++} ++ ++func (c *chacha20poly1305) Overhead() int { ++ return Overhead ++} ++ ++func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { ++ if len(nonce) != NonceSize { ++ panic("chacha20poly1305: bad nonce length passed to Seal") ++ } ++ ++ if uint64(len(plaintext)) > (1<<38)-64 { ++ panic("chacha20poly1305: plaintext too large") ++ } ++ ++ return c.seal(dst, nonce, plaintext, additionalData) ++} ++ ++var errOpen = errors.New("chacha20poly1305: message authentication failed") ++ ++func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { ++ if len(nonce) != NonceSize { ++ panic("chacha20poly1305: bad nonce length passed to Open") ++ } ++ if len(ciphertext) < 16 { ++ return nil, errOpen ++ } ++ if uint64(len(ciphertext)) > (1<<38)-48 { ++ panic("chacha20poly1305: ciphertext too large") ++ } ++ ++ return c.open(dst, nonce, ciphertext, additionalData) ++} ++ ++// sliceForAppend takes a slice and a requested number of bytes. It returns a ++// slice with the contents of the given slice followed by that many bytes and a ++// second slice that aliases into it and contains only the extra bytes. If the ++// original slice has sufficient capacity then no allocation is performed. ++func sliceForAppend(in []byte, n int) (head, tail []byte) { ++ if total := len(in) + n; cap(in) >= total { ++ head = in[:total] ++ } else { ++ head = make([]byte, total) ++ copy(head, in) ++ } ++ tail = head[len(in):] ++ return ++} +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go +new file mode 100644 +index 00000000000..50695a14f62 +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go +@@ -0,0 +1,86 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build gc && !purego ++ ++package chacha20poly1305 ++ ++import ( ++ "encoding/binary" ++ ++ "golang.org/x/crypto/internal/alias" ++ "golang.org/x/sys/cpu" ++) ++ ++//go:noescape ++func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool ++ ++//go:noescape ++func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte) ++ ++var ( ++ useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2 ++) ++ ++// setupState writes a ChaCha20 input matrix to state. See ++// https://tools.ietf.org/html/rfc7539#section-2.3. ++func setupState(state *[16]uint32, key *[32]byte, nonce []byte) { ++ state[0] = 0x61707865 ++ state[1] = 0x3320646e ++ state[2] = 0x79622d32 ++ state[3] = 0x6b206574 ++ ++ state[4] = binary.LittleEndian.Uint32(key[0:4]) ++ state[5] = binary.LittleEndian.Uint32(key[4:8]) ++ state[6] = binary.LittleEndian.Uint32(key[8:12]) ++ state[7] = binary.LittleEndian.Uint32(key[12:16]) ++ state[8] = binary.LittleEndian.Uint32(key[16:20]) ++ state[9] = binary.LittleEndian.Uint32(key[20:24]) ++ state[10] = binary.LittleEndian.Uint32(key[24:28]) ++ state[11] = binary.LittleEndian.Uint32(key[28:32]) ++ ++ state[12] = 0 ++ state[13] = binary.LittleEndian.Uint32(nonce[0:4]) ++ state[14] = binary.LittleEndian.Uint32(nonce[4:8]) ++ state[15] = binary.LittleEndian.Uint32(nonce[8:12]) ++} ++ ++func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { ++ if !cpu.X86.HasSSSE3 { ++ return c.sealGeneric(dst, nonce, plaintext, additionalData) ++ } ++ ++ var state [16]uint32 ++ setupState(&state, &c.key, nonce) ++ ++ ret, out := sliceForAppend(dst, len(plaintext)+16) ++ if alias.InexactOverlap(out, plaintext) { ++ panic("chacha20poly1305: invalid buffer overlap") ++ } ++ chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) ++ return ret ++} ++ ++func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { ++ if !cpu.X86.HasSSSE3 { ++ return c.openGeneric(dst, nonce, ciphertext, additionalData) ++ } ++ ++ var state [16]uint32 ++ setupState(&state, &c.key, nonce) ++ ++ ciphertext = ciphertext[:len(ciphertext)-16] ++ ret, out := sliceForAppend(dst, len(ciphertext)) ++ if alias.InexactOverlap(out, ciphertext) { ++ panic("chacha20poly1305: invalid buffer overlap") ++ } ++ if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { ++ for i := range out { ++ out[i] = 0 ++ } ++ return nil, errOpen ++ } ++ ++ return ret, nil ++} +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +new file mode 100644 +index 00000000000..731d2ac6dbc +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +@@ -0,0 +1,2715 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. ++ ++//go:build gc && !purego ++ ++#include "textflag.h" ++// General register allocation ++#define oup DI ++#define inp SI ++#define inl BX ++#define adp CX // free to reuse, after we hash the additional data ++#define keyp R8 // free to reuse, when we copy the key to stack ++#define itr2 R9 // general iterator ++#define itr1 CX // general iterator ++#define acc0 R10 ++#define acc1 R11 ++#define acc2 R12 ++#define t0 R13 ++#define t1 R14 ++#define t2 R15 ++#define t3 R8 ++// Register and stack allocation for the SSE code ++#define rStore (0*16)(BP) ++#define sStore (1*16)(BP) ++#define state1Store (2*16)(BP) ++#define state2Store (3*16)(BP) ++#define tmpStore (4*16)(BP) ++#define ctr0Store (5*16)(BP) ++#define ctr1Store (6*16)(BP) ++#define ctr2Store (7*16)(BP) ++#define ctr3Store (8*16)(BP) ++#define A0 X0 ++#define A1 X1 ++#define A2 X2 ++#define B0 X3 ++#define B1 X4 ++#define B2 X5 ++#define C0 X6 ++#define C1 X7 ++#define C2 X8 ++#define D0 X9 ++#define D1 X10 ++#define D2 X11 ++#define T0 X12 ++#define T1 X13 ++#define T2 X14 ++#define T3 X15 ++#define A3 T0 ++#define B3 T1 ++#define C3 T2 ++#define D3 T3 ++// Register and stack allocation for the AVX2 code ++#define rsStoreAVX2 (0*32)(BP) ++#define state1StoreAVX2 (1*32)(BP) ++#define state2StoreAVX2 (2*32)(BP) ++#define ctr0StoreAVX2 (3*32)(BP) ++#define ctr1StoreAVX2 (4*32)(BP) ++#define ctr2StoreAVX2 (5*32)(BP) ++#define ctr3StoreAVX2 (6*32)(BP) ++#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack ++#define AA0 Y0 ++#define AA1 Y5 ++#define AA2 Y6 ++#define AA3 Y7 ++#define BB0 Y14 ++#define BB1 Y9 ++#define BB2 Y10 ++#define BB3 Y11 ++#define CC0 Y12 ++#define CC1 Y13 ++#define CC2 Y8 ++#define CC3 Y15 ++#define DD0 Y4 ++#define DD1 Y1 ++#define DD2 Y2 ++#define DD3 Y3 ++#define TT0 DD3 ++#define TT1 AA3 ++#define TT2 BB3 ++#define TT3 CC3 ++// ChaCha20 constants ++DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 ++DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e ++DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 ++DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 ++DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 ++DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e ++DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 ++DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 ++// <<< 16 with PSHUFB ++DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 ++DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A ++DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 ++DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A ++// <<< 8 with PSHUFB ++DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 ++DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B ++DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 ++DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B ++ ++DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 ++DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 ++DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 ++DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 ++ ++DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 ++DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 ++DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 ++DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 ++// Poly1305 key clamp ++DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF ++DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC ++DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF ++DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF ++ ++DATA ·sseIncMask<>+0x00(SB)/8, $0x1 ++DATA ·sseIncMask<>+0x08(SB)/8, $0x0 ++// To load/store the last < 16 bytes in a buffer ++DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff ++DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff ++DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff ++DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff ++DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff ++DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff ++DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff ++DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff ++DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff ++DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff ++DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff ++DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff ++DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff ++DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff ++ ++GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 ++GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 ++// No PALIGNR in Go ASM yet (but VPALIGNR is present). ++#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 ++#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 ++#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 ++#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 ++#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 ++#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 ++#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 ++#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 ++#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 ++#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 ++#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 ++#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 ++#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 ++#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 ++#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 ++#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 ++#define shiftC0Right shiftC0Left ++#define shiftC1Right shiftC1Left ++#define shiftC2Right shiftC2Left ++#define shiftC3Right shiftC3Left ++#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 ++#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 ++#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 ++#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 ++ ++// Some macros ++ ++// ROL rotates the uint32s in register R left by N bits, using temporary T. ++#define ROL(N, R, T) \ ++ MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R ++ ++// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. ++#ifdef GOAMD64_v2 ++#define ROL16(R, T) PSHUFB ·rol16<>(SB), R ++#else ++#define ROL16(R, T) ROL(16, R, T) ++#endif ++ ++// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. ++#ifdef GOAMD64_v2 ++#define ROL8(R, T) PSHUFB ·rol8<>(SB), R ++#else ++#define ROL8(R, T) ROL(8, R, T) ++#endif ++ ++#define chachaQR(A, B, C, D, T) \ ++ PADDD B, A; PXOR A, D; ROL16(D, T) \ ++ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ ++ PADDD B, A; PXOR A, D; ROL8(D, T) \ ++ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B ++ ++#define chachaQR_AVX2(A, B, C, D, T) \ ++ VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ ++ VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ ++ VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ ++ VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B ++ ++#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 ++#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 ++#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX ++#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 ++#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 ++ ++#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 ++#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 ++#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 ++ ++#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage ++#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage ++// ---------------------------------------------------------------------------- ++TEXT polyHashADInternal<>(SB), NOSPLIT, $0 ++ // adp points to beginning of additional data ++ // itr2 holds ad length ++ XORQ acc0, acc0 ++ XORQ acc1, acc1 ++ XORQ acc2, acc2 ++ CMPQ itr2, $13 ++ JNE hashADLoop ++ ++openFastTLSAD: ++ // Special treatment for the TLS case of 13 bytes ++ MOVQ (adp), acc0 ++ MOVQ 5(adp), acc1 ++ SHRQ $24, acc1 ++ MOVQ $1, acc2 ++ polyMul ++ RET ++ ++hashADLoop: ++ // Hash in 16 byte chunks ++ CMPQ itr2, $16 ++ JB hashADTail ++ polyAdd(0(adp)) ++ LEAQ (1*16)(adp), adp ++ SUBQ $16, itr2 ++ polyMul ++ JMP hashADLoop ++ ++hashADTail: ++ CMPQ itr2, $0 ++ JE hashADDone ++ ++ // Hash last < 16 byte tail ++ XORQ t0, t0 ++ XORQ t1, t1 ++ XORQ t2, t2 ++ ADDQ itr2, adp ++ ++hashADTailLoop: ++ SHLQ $8, t0, t1 ++ SHLQ $8, t0 ++ MOVB -1(adp), t2 ++ XORQ t2, t0 ++ DECQ adp ++ DECQ itr2 ++ JNE hashADTailLoop ++ ++hashADTailFinish: ++ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 ++ polyMul ++ ++ // Finished AD ++hashADDone: ++ RET ++ ++// ---------------------------------------------------------------------------- ++// func chacha20Poly1305Open(dst, key, src, ad []byte) bool ++TEXT ·chacha20Poly1305Open(SB), 0, $288-97 ++ // For aligned stack access ++ MOVQ SP, BP ++ ADDQ $32, BP ++ ANDQ $-32, BP ++ MOVQ dst+0(FP), oup ++ MOVQ key+24(FP), keyp ++ MOVQ src+48(FP), inp ++ MOVQ src_len+56(FP), inl ++ MOVQ ad+72(FP), adp ++ ++ // Check for AVX2 support ++ CMPB ·useAVX2(SB), $1 ++ JE chacha20Poly1305Open_AVX2 ++ ++ // Special optimization, for very short buffers ++ CMPQ inl, $128 ++ JBE openSSE128 // About 16% faster ++ ++ // For long buffers, prepare the poly key first ++ MOVOU ·chacha20Constants<>(SB), A0 ++ MOVOU (1*16)(keyp), B0 ++ MOVOU (2*16)(keyp), C0 ++ MOVOU (3*16)(keyp), D0 ++ MOVO D0, T1 ++ ++ // Store state on stack for future use ++ MOVO B0, state1Store ++ MOVO C0, state2Store ++ MOVO D0, ctr3Store ++ MOVQ $10, itr2 ++ ++openSSEPreparePolyKey: ++ chachaQR(A0, B0, C0, D0, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ chachaQR(A0, B0, C0, D0, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ DECQ itr2 ++ JNE openSSEPreparePolyKey ++ ++ // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded ++ PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 ++ ++ // Clamp and store the key ++ PAND ·polyClampMask<>(SB), A0 ++ MOVO A0, rStore; MOVO B0, sStore ++ ++ // Hash AAD ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ ++openSSEMainLoop: ++ CMPQ inl, $256 ++ JB openSSEMainLoopDone ++ ++ // Load state, increment counter blocks ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 ++ ++ // Store counters ++ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store ++ ++ // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 ++ MOVQ $4, itr1 ++ MOVQ inp, itr2 ++ ++openSSEInternalLoop: ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ polyAdd(0(itr2)) ++ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left ++ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left ++ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left ++ polyMulStage1 ++ polyMulStage2 ++ LEAQ (2*8)(itr2), itr2 ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ polyMulStage3 ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ polyMulReduceStage ++ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right ++ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right ++ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right ++ DECQ itr1 ++ JGE openSSEInternalLoop ++ ++ polyAdd(0(itr2)) ++ polyMul ++ LEAQ (2*8)(itr2), itr2 ++ ++ CMPQ itr1, $-6 ++ JG openSSEInternalLoop ++ ++ // Add in the state ++ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 ++ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 ++ PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 ++ PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 ++ ++ // Load - xor - store ++ MOVO D3, tmpStore ++ MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) ++ MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) ++ MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) ++ MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) ++ MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) ++ MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) ++ MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) ++ MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) ++ MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) ++ MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) ++ MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) ++ MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) ++ MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) ++ MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) ++ MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) ++ MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) ++ LEAQ 256(inp), inp ++ LEAQ 256(oup), oup ++ SUBQ $256, inl ++ JMP openSSEMainLoop ++ ++openSSEMainLoopDone: ++ // Handle the various tail sizes efficiently ++ TESTQ inl, inl ++ JE openSSEFinalize ++ CMPQ inl, $64 ++ JBE openSSETail64 ++ CMPQ inl, $128 ++ JBE openSSETail128 ++ CMPQ inl, $192 ++ JBE openSSETail192 ++ JMP openSSETail256 ++ ++openSSEFinalize: ++ // Hash in the PT, AAD lengths ++ ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 ++ polyMul ++ ++ // Final reduce ++ MOVQ acc0, t0 ++ MOVQ acc1, t1 ++ MOVQ acc2, t2 ++ SUBQ $-5, acc0 ++ SBBQ $-1, acc1 ++ SBBQ $3, acc2 ++ CMOVQCS t0, acc0 ++ CMOVQCS t1, acc1 ++ CMOVQCS t2, acc2 ++ ++ // Add in the "s" part of the key ++ ADDQ 0+sStore, acc0 ++ ADCQ 8+sStore, acc1 ++ ++ // Finally, constant time compare to the tag at the end of the message ++ XORQ AX, AX ++ MOVQ $1, DX ++ XORQ (0*8)(inp), acc0 ++ XORQ (1*8)(inp), acc1 ++ ORQ acc1, acc0 ++ CMOVQEQ DX, AX ++ ++ // Return true iff tags are equal ++ MOVB AX, ret+96(FP) ++ RET ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for buffers smaller than 129 bytes ++openSSE128: ++ // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks ++ MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 ++ MOVQ $10, itr2 ++ ++openSSE128InnerCipherLoop: ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Left; shiftB1Left; shiftB2Left ++ shiftC0Left; shiftC1Left; shiftC2Left ++ shiftD0Left; shiftD1Left; shiftD2Left ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Right; shiftB1Right; shiftB2Right ++ shiftC0Right; shiftC1Right; shiftC2Right ++ shiftD0Right; shiftD1Right; shiftD2Right ++ DECQ itr2 ++ JNE openSSE128InnerCipherLoop ++ ++ // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 ++ PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 ++ PADDL T2, C1; PADDL T2, C2 ++ PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 ++ ++ // Clamp and store the key ++ PAND ·polyClampMask<>(SB), A0 ++ MOVOU A0, rStore; MOVOU B0, sStore ++ ++ // Hash ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ ++openSSE128Open: ++ CMPQ inl, $16 ++ JB openSSETail16 ++ SUBQ $16, inl ++ ++ // Load for hashing ++ polyAdd(0(inp)) ++ ++ // Load for decryption ++ MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) ++ LEAQ (1*16)(inp), inp ++ LEAQ (1*16)(oup), oup ++ polyMul ++ ++ // Shift the stream "left" ++ MOVO B1, A1 ++ MOVO C1, B1 ++ MOVO D1, C1 ++ MOVO A2, D1 ++ MOVO B2, A2 ++ MOVO C2, B2 ++ MOVO D2, C2 ++ JMP openSSE128Open ++ ++openSSETail16: ++ TESTQ inl, inl ++ JE openSSEFinalize ++ ++ // We can safely load the CT from the end, because it is padded with the MAC ++ MOVQ inl, itr2 ++ SHLQ $4, itr2 ++ LEAQ ·andMask<>(SB), t0 ++ MOVOU (inp), T0 ++ ADDQ inl, inp ++ PAND -16(t0)(itr2*1), T0 ++ MOVO T0, 0+tmpStore ++ MOVQ T0, t0 ++ MOVQ 8+tmpStore, t1 ++ PXOR A1, T0 ++ ++ // We can only store one byte at a time, since plaintext can be shorter than 16 bytes ++openSSETail16Store: ++ MOVQ T0, t3 ++ MOVB t3, (oup) ++ PSRLDQ $1, T0 ++ INCQ oup ++ DECQ inl ++ JNE openSSETail16Store ++ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 ++ polyMul ++ JMP openSSEFinalize ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 64 bytes of ciphertext ++openSSETail64: ++ // Need to decrypt up to 64 bytes - prepare single block ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store ++ XORQ itr2, itr2 ++ MOVQ inl, itr1 ++ CMPQ itr1, $16 ++ JB openSSETail64LoopB ++ ++openSSETail64LoopA: ++ // Perform ChaCha rounds, while hashing the remaining input ++ polyAdd(0(inp)(itr2*1)) ++ polyMul ++ SUBQ $16, itr1 ++ ++openSSETail64LoopB: ++ ADDQ $16, itr2 ++ chachaQR(A0, B0, C0, D0, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ chachaQR(A0, B0, C0, D0, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ ++ CMPQ itr1, $16 ++ JAE openSSETail64LoopA ++ ++ CMPQ itr2, $160 ++ JNE openSSETail64LoopB ++ ++ PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 ++ ++openSSETail64DecLoop: ++ CMPQ inl, $16 ++ JB openSSETail64DecLoopDone ++ SUBQ $16, inl ++ MOVOU (inp), T0 ++ PXOR T0, A0 ++ MOVOU A0, (oup) ++ LEAQ 16(inp), inp ++ LEAQ 16(oup), oup ++ MOVO B0, A0 ++ MOVO C0, B0 ++ MOVO D0, C0 ++ JMP openSSETail64DecLoop ++ ++openSSETail64DecLoopDone: ++ MOVO A0, A1 ++ JMP openSSETail16 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 128 bytes of ciphertext ++openSSETail128: ++ // Need to decrypt up to 128 bytes - prepare two blocks ++ MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store ++ MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store ++ XORQ itr2, itr2 ++ MOVQ inl, itr1 ++ ANDQ $-16, itr1 ++ ++openSSETail128LoopA: ++ // Perform ChaCha rounds, while hashing the remaining input ++ polyAdd(0(inp)(itr2*1)) ++ polyMul ++ ++openSSETail128LoopB: ++ ADDQ $16, itr2 ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ shiftB1Left; shiftC1Left; shiftD1Left ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ shiftB1Right; shiftC1Right; shiftD1Right ++ ++ CMPQ itr2, itr1 ++ JB openSSETail128LoopA ++ ++ CMPQ itr2, $160 ++ JNE openSSETail128LoopB ++ ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 ++ PADDL state1Store, B0; PADDL state1Store, B1 ++ PADDL state2Store, C0; PADDL state2Store, C1 ++ PADDL ctr1Store, D0; PADDL ctr0Store, D1 ++ ++ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 ++ PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 ++ MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) ++ ++ SUBQ $64, inl ++ LEAQ 64(inp), inp ++ LEAQ 64(oup), oup ++ JMP openSSETail64DecLoop ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 192 bytes of ciphertext ++openSSETail192: ++ // Need to decrypt up to 192 bytes - prepare three blocks ++ MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store ++ MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store ++ MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store ++ ++ MOVQ inl, itr1 ++ MOVQ $160, itr2 ++ CMPQ itr1, $160 ++ CMOVQGT itr2, itr1 ++ ANDQ $-16, itr1 ++ XORQ itr2, itr2 ++ ++openSSLTail192LoopA: ++ // Perform ChaCha rounds, while hashing the remaining input ++ polyAdd(0(inp)(itr2*1)) ++ polyMul ++ ++openSSLTail192LoopB: ++ ADDQ $16, itr2 ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ shiftB1Left; shiftC1Left; shiftD1Left ++ shiftB2Left; shiftC2Left; shiftD2Left ++ ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ shiftB1Right; shiftC1Right; shiftD1Right ++ shiftB2Right; shiftC2Right; shiftD2Right ++ ++ CMPQ itr2, itr1 ++ JB openSSLTail192LoopA ++ ++ CMPQ itr2, $160 ++ JNE openSSLTail192LoopB ++ ++ CMPQ inl, $176 ++ JB openSSLTail192Store ++ ++ polyAdd(160(inp)) ++ polyMul ++ ++ CMPQ inl, $192 ++ JB openSSLTail192Store ++ ++ polyAdd(176(inp)) ++ polyMul ++ ++openSSLTail192Store: ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 ++ PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 ++ PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 ++ PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 ++ ++ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 ++ PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 ++ MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) ++ ++ MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 ++ PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 ++ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) ++ ++ SUBQ $128, inl ++ LEAQ 128(inp), inp ++ LEAQ 128(oup), oup ++ JMP openSSETail64DecLoop ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 256 bytes of ciphertext ++openSSETail256: ++ // Need to decrypt up to 256 bytes - prepare four blocks ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 ++ ++ // Store counters ++ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store ++ XORQ itr2, itr2 ++ ++openSSETail256Loop: ++ // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication ++ polyAdd(0(inp)(itr2*1)) ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left ++ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left ++ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left ++ polyMulStage1 ++ polyMulStage2 ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ polyMulStage3 ++ polyMulReduceStage ++ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right ++ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right ++ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right ++ ADDQ $2*8, itr2 ++ CMPQ itr2, $160 ++ JB openSSETail256Loop ++ MOVQ inl, itr1 ++ ANDQ $-16, itr1 ++ ++openSSETail256HashLoop: ++ polyAdd(0(inp)(itr2*1)) ++ polyMul ++ ADDQ $2*8, itr2 ++ CMPQ itr2, itr1 ++ JB openSSETail256HashLoop ++ ++ // Add in the state ++ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 ++ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 ++ PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 ++ PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 ++ MOVO D3, tmpStore ++ ++ // Load - xor - store ++ MOVOU (0*16)(inp), D3; PXOR D3, A0 ++ MOVOU (1*16)(inp), D3; PXOR D3, B0 ++ MOVOU (2*16)(inp), D3; PXOR D3, C0 ++ MOVOU (3*16)(inp), D3; PXOR D3, D0 ++ MOVOU A0, (0*16)(oup) ++ MOVOU B0, (1*16)(oup) ++ MOVOU C0, (2*16)(oup) ++ MOVOU D0, (3*16)(oup) ++ MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 ++ PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 ++ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) ++ MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 ++ PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 ++ MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) ++ LEAQ 192(inp), inp ++ LEAQ 192(oup), oup ++ SUBQ $192, inl ++ MOVO A3, A0 ++ MOVO B3, B0 ++ MOVO C3, C0 ++ MOVO tmpStore, D0 ++ ++ JMP openSSETail64DecLoop ++ ++// ---------------------------------------------------------------------------- ++// ------------------------- AVX2 Code ---------------------------------------- ++chacha20Poly1305Open_AVX2: ++ VZEROUPPER ++ VMOVDQU ·chacha20Constants<>(SB), AA0 ++ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 ++ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 ++ BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 ++ VPADDD ·avx2InitMask<>(SB), DD0, DD0 ++ ++ // Special optimization, for very short buffers ++ CMPQ inl, $192 ++ JBE openAVX2192 ++ CMPQ inl, $320 ++ JBE openAVX2320 ++ ++ // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream ++ VMOVDQA BB0, state1StoreAVX2 ++ VMOVDQA CC0, state2StoreAVX2 ++ VMOVDQA DD0, ctr3StoreAVX2 ++ MOVQ $10, itr2 ++ ++openAVX2PreparePolyKey: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 ++ DECQ itr2 ++ JNE openAVX2PreparePolyKey ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0 ++ VPADDD state1StoreAVX2, BB0, BB0 ++ VPADDD state2StoreAVX2, CC0, CC0 ++ VPADDD ctr3StoreAVX2, DD0, DD0 ++ ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ ++ // Clamp and store poly key ++ VPAND ·polyClampMask<>(SB), TT0, TT0 ++ VMOVDQA TT0, rsStoreAVX2 ++ ++ // Stream for the first 64 bytes ++ VPERM2I128 $0x13, AA0, BB0, AA0 ++ VPERM2I128 $0x13, CC0, DD0, BB0 ++ ++ // Hash AD + first 64 bytes ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ XORQ itr1, itr1 ++ ++openAVX2InitialHash64: ++ polyAdd(0(inp)(itr1*1)) ++ polyMulAVX2 ++ ADDQ $16, itr1 ++ CMPQ itr1, $64 ++ JNE openAVX2InitialHash64 ++ ++ // Decrypt the first 64 bytes ++ VPXOR (0*32)(inp), AA0, AA0 ++ VPXOR (1*32)(inp), BB0, BB0 ++ VMOVDQU AA0, (0*32)(oup) ++ VMOVDQU BB0, (1*32)(oup) ++ LEAQ (2*32)(inp), inp ++ LEAQ (2*32)(oup), oup ++ SUBQ $64, inl ++ ++openAVX2MainLoop: ++ CMPQ inl, $512 ++ JB openAVX2MainLoopDone ++ ++ // Load state, increment counter blocks, store the incremented counters ++ VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 ++ VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 ++ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 ++ XORQ itr1, itr1 ++ ++openAVX2InternalLoop: ++ // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications ++ // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext ++ polyAdd(0*8(inp)(itr1*1)) ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ polyMulStage1_AVX2 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ polyMulStage2_AVX2 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyMulStage3_AVX2 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulReduceStage ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ polyAdd(2*8(inp)(itr1*1)) ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ polyMulStage1_AVX2 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulStage2_AVX2 ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ polyMulStage3_AVX2 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ polyMulReduceStage ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyAdd(4*8(inp)(itr1*1)) ++ LEAQ (6*8)(itr1), itr1 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulStage1_AVX2 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ polyMulStage2_AVX2 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ polyMulStage3_AVX2 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulReduceStage ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 ++ CMPQ itr1, $480 ++ JNE openAVX2InternalLoop ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ ++ // We only hashed 480 of the 512 bytes available - hash the remaining 32 here ++ polyAdd(480(inp)) ++ polyMulAVX2 ++ VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 ++ VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 ++ VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) ++ ++ // and here ++ polyAdd(496(inp)) ++ polyMulAVX2 ++ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 ++ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) ++ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 ++ VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) ++ LEAQ (32*16)(inp), inp ++ LEAQ (32*16)(oup), oup ++ SUBQ $(32*16), inl ++ JMP openAVX2MainLoop ++ ++openAVX2MainLoopDone: ++ // Handle the various tail sizes efficiently ++ TESTQ inl, inl ++ JE openSSEFinalize ++ CMPQ inl, $128 ++ JBE openAVX2Tail128 ++ CMPQ inl, $256 ++ JBE openAVX2Tail256 ++ CMPQ inl, $384 ++ JBE openAVX2Tail384 ++ JMP openAVX2Tail512 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for buffers smaller than 193 bytes ++openAVX2192: ++ // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks ++ VMOVDQA AA0, AA1 ++ VMOVDQA BB0, BB1 ++ VMOVDQA CC0, CC1 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA AA0, AA2 ++ VMOVDQA BB0, BB2 ++ VMOVDQA CC0, CC2 ++ VMOVDQA DD0, DD2 ++ VMOVDQA DD1, TT3 ++ MOVQ $10, itr2 ++ ++openAVX2192InnerCipherLoop: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 ++ DECQ itr2 ++ JNE openAVX2192InnerCipherLoop ++ VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 ++ VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 ++ VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 ++ VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ ++ // Clamp and store poly key ++ VPAND ·polyClampMask<>(SB), TT0, TT0 ++ VMOVDQA TT0, rsStoreAVX2 ++ ++ // Stream for up to 192 bytes ++ VPERM2I128 $0x13, AA0, BB0, AA0 ++ VPERM2I128 $0x13, CC0, DD0, BB0 ++ VPERM2I128 $0x02, AA1, BB1, CC0 ++ VPERM2I128 $0x02, CC1, DD1, DD0 ++ VPERM2I128 $0x13, AA1, BB1, AA1 ++ VPERM2I128 $0x13, CC1, DD1, BB1 ++ ++openAVX2ShortOpen: ++ // Hash ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ ++openAVX2ShortOpenLoop: ++ CMPQ inl, $32 ++ JB openAVX2ShortTail32 ++ SUBQ $32, inl ++ ++ // Load for hashing ++ polyAdd(0*8(inp)) ++ polyMulAVX2 ++ polyAdd(2*8(inp)) ++ polyMulAVX2 ++ ++ // Load for decryption ++ VPXOR (inp), AA0, AA0 ++ VMOVDQU AA0, (oup) ++ LEAQ (1*32)(inp), inp ++ LEAQ (1*32)(oup), oup ++ ++ // Shift stream left ++ VMOVDQA BB0, AA0 ++ VMOVDQA CC0, BB0 ++ VMOVDQA DD0, CC0 ++ VMOVDQA AA1, DD0 ++ VMOVDQA BB1, AA1 ++ VMOVDQA CC1, BB1 ++ VMOVDQA DD1, CC1 ++ VMOVDQA AA2, DD1 ++ VMOVDQA BB2, AA2 ++ JMP openAVX2ShortOpenLoop ++ ++openAVX2ShortTail32: ++ CMPQ inl, $16 ++ VMOVDQA A0, A1 ++ JB openAVX2ShortDone ++ ++ SUBQ $16, inl ++ ++ // Load for hashing ++ polyAdd(0*8(inp)) ++ polyMulAVX2 ++ ++ // Load for decryption ++ VPXOR (inp), A0, T0 ++ VMOVDQU T0, (oup) ++ LEAQ (1*16)(inp), inp ++ LEAQ (1*16)(oup), oup ++ VPERM2I128 $0x11, AA0, AA0, AA0 ++ VMOVDQA A0, A1 ++ ++openAVX2ShortDone: ++ VZEROUPPER ++ JMP openSSETail16 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for buffers smaller than 321 bytes ++openAVX2320: ++ // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks ++ VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 ++ VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 ++ MOVQ $10, itr2 ++ ++openAVX2320InnerCipherLoop: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 ++ DECQ itr2 ++ JNE openAVX2320InnerCipherLoop ++ ++ VMOVDQA ·chacha20Constants<>(SB), TT0 ++ VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 ++ VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 ++ VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 ++ VMOVDQA ·avx2IncMask<>(SB), TT0 ++ VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 ++ VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 ++ VPADDD TT3, DD2, DD2 ++ ++ // Clamp and store poly key ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ VPAND ·polyClampMask<>(SB), TT0, TT0 ++ VMOVDQA TT0, rsStoreAVX2 ++ ++ // Stream for up to 320 bytes ++ VPERM2I128 $0x13, AA0, BB0, AA0 ++ VPERM2I128 $0x13, CC0, DD0, BB0 ++ VPERM2I128 $0x02, AA1, BB1, CC0 ++ VPERM2I128 $0x02, CC1, DD1, DD0 ++ VPERM2I128 $0x13, AA1, BB1, AA1 ++ VPERM2I128 $0x13, CC1, DD1, BB1 ++ VPERM2I128 $0x02, AA2, BB2, CC1 ++ VPERM2I128 $0x02, CC2, DD2, DD1 ++ VPERM2I128 $0x13, AA2, BB2, AA2 ++ VPERM2I128 $0x13, CC2, DD2, BB2 ++ JMP openAVX2ShortOpen ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 128 bytes of ciphertext ++openAVX2Tail128: ++ // Need to decrypt up to 128 bytes - prepare two blocks ++ VMOVDQA ·chacha20Constants<>(SB), AA1 ++ VMOVDQA state1StoreAVX2, BB1 ++ VMOVDQA state2StoreAVX2, CC1 ++ VMOVDQA ctr3StoreAVX2, DD1 ++ VPADDD ·avx2IncMask<>(SB), DD1, DD1 ++ VMOVDQA DD1, DD0 ++ ++ XORQ itr2, itr2 ++ MOVQ inl, itr1 ++ ANDQ $-16, itr1 ++ TESTQ itr1, itr1 ++ JE openAVX2Tail128LoopB ++ ++openAVX2Tail128LoopA: ++ // Perform ChaCha rounds, while hashing the remaining input ++ polyAdd(0(inp)(itr2*1)) ++ polyMulAVX2 ++ ++openAVX2Tail128LoopB: ++ ADDQ $16, itr2 ++ chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $4, BB1, BB1, BB1 ++ VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $12, DD1, DD1, DD1 ++ chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $12, BB1, BB1, BB1 ++ VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $4, DD1, DD1, DD1 ++ CMPQ itr2, itr1 ++ JB openAVX2Tail128LoopA ++ CMPQ itr2, $160 ++ JNE openAVX2Tail128LoopB ++ ++ VPADDD ·chacha20Constants<>(SB), AA1, AA1 ++ VPADDD state1StoreAVX2, BB1, BB1 ++ VPADDD state2StoreAVX2, CC1, CC1 ++ VPADDD DD0, DD1, DD1 ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ ++openAVX2TailLoop: ++ CMPQ inl, $32 ++ JB openAVX2Tail ++ SUBQ $32, inl ++ ++ // Load for decryption ++ VPXOR (inp), AA0, AA0 ++ VMOVDQU AA0, (oup) ++ LEAQ (1*32)(inp), inp ++ LEAQ (1*32)(oup), oup ++ VMOVDQA BB0, AA0 ++ VMOVDQA CC0, BB0 ++ VMOVDQA DD0, CC0 ++ JMP openAVX2TailLoop ++ ++openAVX2Tail: ++ CMPQ inl, $16 ++ VMOVDQA A0, A1 ++ JB openAVX2TailDone ++ SUBQ $16, inl ++ ++ // Load for decryption ++ VPXOR (inp), A0, T0 ++ VMOVDQU T0, (oup) ++ LEAQ (1*16)(inp), inp ++ LEAQ (1*16)(oup), oup ++ VPERM2I128 $0x11, AA0, AA0, AA0 ++ VMOVDQA A0, A1 ++ ++openAVX2TailDone: ++ VZEROUPPER ++ JMP openSSETail16 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 256 bytes of ciphertext ++openAVX2Tail256: ++ // Need to decrypt up to 256 bytes - prepare four blocks ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA DD0, TT1 ++ VMOVDQA DD1, TT2 ++ ++ // Compute the number of iterations that will hash data ++ MOVQ inl, tmpStoreAVX2 ++ MOVQ inl, itr1 ++ SUBQ $128, itr1 ++ SHRQ $4, itr1 ++ MOVQ $10, itr2 ++ CMPQ itr1, $10 ++ CMOVQGT itr2, itr1 ++ MOVQ inp, inl ++ XORQ itr2, itr2 ++ ++openAVX2Tail256LoopA: ++ polyAdd(0(inl)) ++ polyMulAVX2 ++ LEAQ 16(inl), inl ++ ++ // Perform ChaCha rounds, while hashing the remaining input ++openAVX2Tail256LoopB: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 ++ INCQ itr2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 ++ CMPQ itr2, itr1 ++ JB openAVX2Tail256LoopA ++ ++ CMPQ itr2, $10 ++ JNE openAVX2Tail256LoopB ++ ++ MOVQ inl, itr2 ++ SUBQ inp, inl ++ MOVQ inl, itr1 ++ MOVQ tmpStoreAVX2, inl ++ ++ // Hash the remainder of data (if any) ++openAVX2Tail256Hash: ++ ADDQ $16, itr1 ++ CMPQ itr1, inl ++ JGT openAVX2Tail256HashEnd ++ polyAdd (0(itr2)) ++ polyMulAVX2 ++ LEAQ 16(itr2), itr2 ++ JMP openAVX2Tail256Hash ++ ++// Store 128 bytes safely, then go to store loop ++openAVX2Tail256HashEnd: ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 ++ VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 ++ VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ ++ VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 ++ VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) ++ LEAQ (4*32)(inp), inp ++ LEAQ (4*32)(oup), oup ++ SUBQ $4*32, inl ++ ++ JMP openAVX2TailLoop ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 384 bytes of ciphertext ++openAVX2Tail384: ++ // Need to decrypt up to 384 bytes - prepare six blocks ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VPADDD ·avx2IncMask<>(SB), DD1, DD2 ++ VMOVDQA DD0, ctr0StoreAVX2 ++ VMOVDQA DD1, ctr1StoreAVX2 ++ VMOVDQA DD2, ctr2StoreAVX2 ++ ++ // Compute the number of iterations that will hash two blocks of data ++ MOVQ inl, tmpStoreAVX2 ++ MOVQ inl, itr1 ++ SUBQ $256, itr1 ++ SHRQ $4, itr1 ++ ADDQ $6, itr1 ++ MOVQ $10, itr2 ++ CMPQ itr1, $10 ++ CMOVQGT itr2, itr1 ++ MOVQ inp, inl ++ XORQ itr2, itr2 ++ ++ // Perform ChaCha rounds, while hashing the remaining input ++openAVX2Tail384LoopB: ++ polyAdd(0(inl)) ++ polyMulAVX2 ++ LEAQ 16(inl), inl ++ ++openAVX2Tail384LoopA: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 ++ polyAdd(0(inl)) ++ polyMulAVX2 ++ LEAQ 16(inl), inl ++ INCQ itr2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 ++ ++ CMPQ itr2, itr1 ++ JB openAVX2Tail384LoopB ++ ++ CMPQ itr2, $10 ++ JNE openAVX2Tail384LoopA ++ ++ MOVQ inl, itr2 ++ SUBQ inp, inl ++ MOVQ inl, itr1 ++ MOVQ tmpStoreAVX2, inl ++ ++openAVX2Tail384Hash: ++ ADDQ $16, itr1 ++ CMPQ itr1, inl ++ JGT openAVX2Tail384HashEnd ++ polyAdd(0(itr2)) ++ polyMulAVX2 ++ LEAQ 16(itr2), itr2 ++ JMP openAVX2Tail384Hash ++ ++// Store 256 bytes safely, then go to store loop ++openAVX2Tail384HashEnd: ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 ++ VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 ++ VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 ++ VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) ++ VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 ++ VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 ++ VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) ++ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 ++ LEAQ (8*32)(inp), inp ++ LEAQ (8*32)(oup), oup ++ SUBQ $8*32, inl ++ JMP openAVX2TailLoop ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 512 bytes of ciphertext ++openAVX2Tail512: ++ VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 ++ VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 ++ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 ++ XORQ itr1, itr1 ++ MOVQ inp, itr2 ++ ++openAVX2Tail512LoopB: ++ polyAdd(0(itr2)) ++ polyMulAVX2 ++ LEAQ (2*8)(itr2), itr2 ++ ++openAVX2Tail512LoopA: ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyAdd(0*8(itr2)) ++ polyMulAVX2 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyAdd(2*8(itr2)) ++ polyMulAVX2 ++ LEAQ (4*8)(itr2), itr2 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 ++ INCQ itr1 ++ CMPQ itr1, $4 ++ JLT openAVX2Tail512LoopB ++ ++ CMPQ itr1, $10 ++ JNE openAVX2Tail512LoopA ++ ++ MOVQ inl, itr1 ++ SUBQ $384, itr1 ++ ANDQ $-16, itr1 ++ ++openAVX2Tail512HashLoop: ++ TESTQ itr1, itr1 ++ JE openAVX2Tail512HashEnd ++ polyAdd(0(itr2)) ++ polyMulAVX2 ++ LEAQ 16(itr2), itr2 ++ SUBQ $16, itr1 ++ JMP openAVX2Tail512HashLoop ++ ++openAVX2Tail512HashEnd: ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 ++ VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 ++ VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) ++ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 ++ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) ++ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 ++ ++ LEAQ (12*32)(inp), inp ++ LEAQ (12*32)(oup), oup ++ SUBQ $12*32, inl ++ ++ JMP openAVX2TailLoop ++ ++// ---------------------------------------------------------------------------- ++// ---------------------------------------------------------------------------- ++// func chacha20Poly1305Seal(dst, key, src, ad []byte) ++TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 ++ // For aligned stack access ++ MOVQ SP, BP ++ ADDQ $32, BP ++ ANDQ $-32, BP ++ MOVQ dst+0(FP), oup ++ MOVQ key+24(FP), keyp ++ MOVQ src+48(FP), inp ++ MOVQ src_len+56(FP), inl ++ MOVQ ad+72(FP), adp ++ ++ CMPB ·useAVX2(SB), $1 ++ JE chacha20Poly1305Seal_AVX2 ++ ++ // Special optimization, for very short buffers ++ CMPQ inl, $128 ++ JBE sealSSE128 // About 15% faster ++ ++ // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration ++ MOVOU ·chacha20Constants<>(SB), A0 ++ MOVOU (1*16)(keyp), B0 ++ MOVOU (2*16)(keyp), C0 ++ MOVOU (3*16)(keyp), D0 ++ ++ // Store state on stack for future use ++ MOVO B0, state1Store ++ MOVO C0, state2Store ++ ++ // Load state, increment counter blocks ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 ++ ++ // Store counters ++ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store ++ MOVQ $10, itr2 ++ ++sealSSEIntroLoop: ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left ++ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left ++ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left ++ ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right ++ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right ++ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right ++ DECQ itr2 ++ JNE sealSSEIntroLoop ++ ++ // Add in the state ++ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 ++ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 ++ PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 ++ PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 ++ ++ // Clamp and store the key ++ PAND ·polyClampMask<>(SB), A0 ++ MOVO A0, rStore ++ MOVO B0, sStore ++ ++ // Hash AAD ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ ++ MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 ++ PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 ++ MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) ++ MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 ++ PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 ++ MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) ++ ++ MOVQ $128, itr1 ++ SUBQ $128, inl ++ LEAQ 128(inp), inp ++ ++ MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 ++ ++ CMPQ inl, $64 ++ JBE sealSSE128SealHash ++ ++ MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 ++ PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 ++ MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) ++ ++ ADDQ $64, itr1 ++ SUBQ $64, inl ++ LEAQ 64(inp), inp ++ ++ MOVQ $2, itr1 ++ MOVQ $8, itr2 ++ ++ CMPQ inl, $64 ++ JBE sealSSETail64 ++ CMPQ inl, $128 ++ JBE sealSSETail128 ++ CMPQ inl, $192 ++ JBE sealSSETail192 ++ ++sealSSEMainLoop: ++ // Load state, increment counter blocks ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 ++ ++ // Store counters ++ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store ++ ++sealSSEInnerLoop: ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ polyAdd(0(oup)) ++ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left ++ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left ++ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left ++ polyMulStage1 ++ polyMulStage2 ++ LEAQ (2*8)(oup), oup ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ polyMulStage3 ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ polyMulReduceStage ++ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right ++ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right ++ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right ++ DECQ itr2 ++ JGE sealSSEInnerLoop ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ (2*8)(oup), oup ++ DECQ itr1 ++ JG sealSSEInnerLoop ++ ++ // Add in the state ++ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 ++ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 ++ PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 ++ PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 ++ MOVO D3, tmpStore ++ ++ // Load - xor - store ++ MOVOU (0*16)(inp), D3; PXOR D3, A0 ++ MOVOU (1*16)(inp), D3; PXOR D3, B0 ++ MOVOU (2*16)(inp), D3; PXOR D3, C0 ++ MOVOU (3*16)(inp), D3; PXOR D3, D0 ++ MOVOU A0, (0*16)(oup) ++ MOVOU B0, (1*16)(oup) ++ MOVOU C0, (2*16)(oup) ++ MOVOU D0, (3*16)(oup) ++ MOVO tmpStore, D3 ++ ++ MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 ++ PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 ++ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) ++ MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 ++ PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 ++ MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) ++ ADDQ $192, inp ++ MOVQ $192, itr1 ++ SUBQ $192, inl ++ MOVO A3, A1 ++ MOVO B3, B1 ++ MOVO C3, C1 ++ MOVO D3, D1 ++ CMPQ inl, $64 ++ JBE sealSSE128SealHash ++ MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 ++ PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 ++ MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) ++ LEAQ 64(inp), inp ++ SUBQ $64, inl ++ MOVQ $6, itr1 ++ MOVQ $4, itr2 ++ CMPQ inl, $192 ++ JG sealSSEMainLoop ++ ++ MOVQ inl, itr1 ++ TESTQ inl, inl ++ JE sealSSE128SealHash ++ MOVQ $6, itr1 ++ CMPQ inl, $64 ++ JBE sealSSETail64 ++ CMPQ inl, $128 ++ JBE sealSSETail128 ++ JMP sealSSETail192 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 64 bytes of plaintext ++sealSSETail64: ++ // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes ++ MOVO ·chacha20Constants<>(SB), A1 ++ MOVO state1Store, B1 ++ MOVO state2Store, C1 ++ MOVO ctr3Store, D1 ++ PADDL ·sseIncMask<>(SB), D1 ++ MOVO D1, ctr0Store ++ ++sealSSETail64LoopA: ++ // Perform ChaCha rounds, while hashing the previously encrypted ciphertext ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealSSETail64LoopB: ++ chachaQR(A1, B1, C1, D1, T1) ++ shiftB1Left; shiftC1Left; shiftD1Left ++ chachaQR(A1, B1, C1, D1, T1) ++ shiftB1Right; shiftC1Right; shiftD1Right ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++ DECQ itr1 ++ JG sealSSETail64LoopA ++ ++ DECQ itr2 ++ JGE sealSSETail64LoopB ++ PADDL ·chacha20Constants<>(SB), A1 ++ PADDL state1Store, B1 ++ PADDL state2Store, C1 ++ PADDL ctr0Store, D1 ++ ++ JMP sealSSE128Seal ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 128 bytes of plaintext ++sealSSETail128: ++ // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store ++ ++sealSSETail128LoopA: ++ // Perform ChaCha rounds, while hashing the previously encrypted ciphertext ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealSSETail128LoopB: ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ shiftB1Left; shiftC1Left; shiftD1Left ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ shiftB1Right; shiftC1Right; shiftD1Right ++ ++ DECQ itr1 ++ JG sealSSETail128LoopA ++ ++ DECQ itr2 ++ JGE sealSSETail128LoopB ++ ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 ++ PADDL state1Store, B0; PADDL state1Store, B1 ++ PADDL state2Store, C0; PADDL state2Store, C1 ++ PADDL ctr0Store, D0; PADDL ctr1Store, D1 ++ ++ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 ++ PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 ++ MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) ++ ++ MOVQ $64, itr1 ++ LEAQ 64(inp), inp ++ SUBQ $64, inl ++ ++ JMP sealSSE128SealHash ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 192 bytes of plaintext ++sealSSETail192: ++ // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store ++ ++sealSSETail192LoopA: ++ // Perform ChaCha rounds, while hashing the previously encrypted ciphertext ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealSSETail192LoopB: ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ shiftB1Left; shiftC1Left; shiftD1Left ++ shiftB2Left; shiftC2Left; shiftD2Left ++ ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ shiftB1Right; shiftC1Right; shiftD1Right ++ shiftB2Right; shiftC2Right; shiftD2Right ++ ++ DECQ itr1 ++ JG sealSSETail192LoopA ++ ++ DECQ itr2 ++ JGE sealSSETail192LoopB ++ ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 ++ PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 ++ PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 ++ PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 ++ ++ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 ++ PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 ++ MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) ++ MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 ++ PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 ++ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) ++ ++ MOVO A2, A1 ++ MOVO B2, B1 ++ MOVO C2, C1 ++ MOVO D2, D1 ++ MOVQ $128, itr1 ++ LEAQ 128(inp), inp ++ SUBQ $128, inl ++ ++ JMP sealSSE128SealHash ++ ++// ---------------------------------------------------------------------------- ++// Special seal optimization for buffers smaller than 129 bytes ++sealSSE128: ++ // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks ++ MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 ++ MOVQ $10, itr2 ++ ++sealSSE128InnerCipherLoop: ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Left; shiftB1Left; shiftB2Left ++ shiftC0Left; shiftC1Left; shiftC2Left ++ shiftD0Left; shiftD1Left; shiftD2Left ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Right; shiftB1Right; shiftB2Right ++ shiftC0Right; shiftC1Right; shiftC2Right ++ shiftD0Right; shiftD1Right; shiftD2Right ++ DECQ itr2 ++ JNE sealSSE128InnerCipherLoop ++ ++ // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 ++ PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 ++ PADDL T2, C1; PADDL T2, C2 ++ PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 ++ PAND ·polyClampMask<>(SB), A0 ++ MOVOU A0, rStore ++ MOVOU B0, sStore ++ ++ // Hash ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ XORQ itr1, itr1 ++ ++sealSSE128SealHash: ++ // itr1 holds the number of bytes encrypted but not yet hashed ++ CMPQ itr1, $16 ++ JB sealSSE128Seal ++ polyAdd(0(oup)) ++ polyMul ++ ++ SUBQ $16, itr1 ++ ADDQ $16, oup ++ ++ JMP sealSSE128SealHash ++ ++sealSSE128Seal: ++ CMPQ inl, $16 ++ JB sealSSETail ++ SUBQ $16, inl ++ ++ // Load for decryption ++ MOVOU (inp), T0 ++ PXOR T0, A1 ++ MOVOU A1, (oup) ++ LEAQ (1*16)(inp), inp ++ LEAQ (1*16)(oup), oup ++ ++ // Extract for hashing ++ MOVQ A1, t0 ++ PSRLDQ $8, A1 ++ MOVQ A1, t1 ++ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 ++ polyMul ++ ++ // Shift the stream "left" ++ MOVO B1, A1 ++ MOVO C1, B1 ++ MOVO D1, C1 ++ MOVO A2, D1 ++ MOVO B2, A2 ++ MOVO C2, B2 ++ MOVO D2, C2 ++ JMP sealSSE128Seal ++ ++sealSSETail: ++ TESTQ inl, inl ++ JE sealSSEFinalize ++ ++ // We can only load the PT one byte at a time to avoid read after end of buffer ++ MOVQ inl, itr2 ++ SHLQ $4, itr2 ++ LEAQ ·andMask<>(SB), t0 ++ MOVQ inl, itr1 ++ LEAQ -1(inp)(inl*1), inp ++ XORQ t2, t2 ++ XORQ t3, t3 ++ XORQ AX, AX ++ ++sealSSETailLoadLoop: ++ SHLQ $8, t2, t3 ++ SHLQ $8, t2 ++ MOVB (inp), AX ++ XORQ AX, t2 ++ LEAQ -1(inp), inp ++ DECQ itr1 ++ JNE sealSSETailLoadLoop ++ MOVQ t2, 0+tmpStore ++ MOVQ t3, 8+tmpStore ++ PXOR 0+tmpStore, A1 ++ MOVOU A1, (oup) ++ MOVOU -16(t0)(itr2*1), T0 ++ PAND T0, A1 ++ MOVQ A1, t0 ++ PSRLDQ $8, A1 ++ MOVQ A1, t1 ++ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 ++ polyMul ++ ++ ADDQ inl, oup ++ ++sealSSEFinalize: ++ // Hash in the buffer lengths ++ ADDQ ad_len+80(FP), acc0 ++ ADCQ src_len+56(FP), acc1 ++ ADCQ $1, acc2 ++ polyMul ++ ++ // Final reduce ++ MOVQ acc0, t0 ++ MOVQ acc1, t1 ++ MOVQ acc2, t2 ++ SUBQ $-5, acc0 ++ SBBQ $-1, acc1 ++ SBBQ $3, acc2 ++ CMOVQCS t0, acc0 ++ CMOVQCS t1, acc1 ++ CMOVQCS t2, acc2 ++ ++ // Add in the "s" part of the key ++ ADDQ 0+sStore, acc0 ++ ADCQ 8+sStore, acc1 ++ ++ // Finally store the tag at the end of the message ++ MOVQ acc0, (0*8)(oup) ++ MOVQ acc1, (1*8)(oup) ++ RET ++ ++// ---------------------------------------------------------------------------- ++// ------------------------- AVX2 Code ---------------------------------------- ++chacha20Poly1305Seal_AVX2: ++ VZEROUPPER ++ VMOVDQU ·chacha20Constants<>(SB), AA0 ++ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 ++ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 ++ BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 ++ VPADDD ·avx2InitMask<>(SB), DD0, DD0 ++ ++ // Special optimizations, for very short buffers ++ CMPQ inl, $192 ++ JBE seal192AVX2 // 33% faster ++ CMPQ inl, $320 ++ JBE seal320AVX2 // 17% faster ++ ++ // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream ++ VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 ++ VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 ++ VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 ++ VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 ++ VMOVDQA DD3, ctr3StoreAVX2 ++ MOVQ $10, itr2 ++ ++sealAVX2IntroLoop: ++ VMOVDQA CC3, tmpStoreAVX2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) ++ VMOVDQA tmpStoreAVX2, CC3 ++ VMOVDQA CC1, tmpStoreAVX2 ++ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) ++ VMOVDQA tmpStoreAVX2, CC1 ++ ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 ++ VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 ++ VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 ++ VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 ++ ++ VMOVDQA CC3, tmpStoreAVX2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) ++ VMOVDQA tmpStoreAVX2, CC3 ++ VMOVDQA CC1, tmpStoreAVX2 ++ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) ++ VMOVDQA tmpStoreAVX2, CC1 ++ ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 ++ VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 ++ VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 ++ VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 ++ DECQ itr2 ++ JNE sealAVX2IntroLoop ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 ++ ++ VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 ++ VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key ++ VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 ++ ++ // Clamp and store poly key ++ VPAND ·polyClampMask<>(SB), DD0, DD0 ++ VMOVDQA DD0, rsStoreAVX2 ++ ++ // Hash AD ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ ++ // Can store at least 320 bytes ++ VPXOR (0*32)(inp), AA0, AA0 ++ VPXOR (1*32)(inp), CC0, CC0 ++ VMOVDQU AA0, (0*32)(oup) ++ VMOVDQU CC0, (1*32)(oup) ++ ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) ++ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 ++ VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) ++ ++ MOVQ $320, itr1 ++ SUBQ $320, inl ++ LEAQ 320(inp), inp ++ ++ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 ++ CMPQ inl, $128 ++ JBE sealAVX2SealHash ++ ++ VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) ++ SUBQ $128, inl ++ LEAQ 128(inp), inp ++ ++ MOVQ $8, itr1 ++ MOVQ $2, itr2 ++ ++ CMPQ inl, $128 ++ JBE sealAVX2Tail128 ++ CMPQ inl, $256 ++ JBE sealAVX2Tail256 ++ CMPQ inl, $384 ++ JBE sealAVX2Tail384 ++ CMPQ inl, $512 ++ JBE sealAVX2Tail512 ++ ++ // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 ++ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 ++ ++ VMOVDQA CC3, tmpStoreAVX2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) ++ VMOVDQA tmpStoreAVX2, CC3 ++ VMOVDQA CC1, tmpStoreAVX2 ++ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) ++ VMOVDQA tmpStoreAVX2, CC1 ++ ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 ++ VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 ++ VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 ++ VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 ++ ++ VMOVDQA CC3, tmpStoreAVX2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) ++ VMOVDQA tmpStoreAVX2, CC3 ++ VMOVDQA CC1, tmpStoreAVX2 ++ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) ++ VMOVDQA tmpStoreAVX2, CC1 ++ ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 ++ VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 ++ VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 ++ VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ ++ SUBQ $16, oup // Adjust the pointer ++ MOVQ $9, itr1 ++ JMP sealAVX2InternalLoopStart ++ ++sealAVX2MainLoop: ++ // Load state, increment counter blocks, store the incremented counters ++ VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 ++ VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 ++ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 ++ MOVQ $10, itr1 ++ ++sealAVX2InternalLoop: ++ polyAdd(0*8(oup)) ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ polyMulStage1_AVX2 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ polyMulStage2_AVX2 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyMulStage3_AVX2 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulReduceStage ++ ++sealAVX2InternalLoopStart: ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ polyAdd(2*8(oup)) ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ polyMulStage1_AVX2 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulStage2_AVX2 ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ polyMulStage3_AVX2 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ polyMulReduceStage ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyAdd(4*8(oup)) ++ LEAQ (6*8)(oup), oup ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulStage1_AVX2 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ polyMulStage2_AVX2 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ polyMulStage3_AVX2 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulReduceStage ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 ++ DECQ itr1 ++ JNE sealAVX2InternalLoop ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ ++ // We only hashed 480 of the 512 bytes available - hash the remaining 32 here ++ polyAdd(0*8(oup)) ++ polyMulAVX2 ++ LEAQ (4*8)(oup), oup ++ VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 ++ VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 ++ VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) ++ ++ // and here ++ polyAdd(-2*8(oup)) ++ polyMulAVX2 ++ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 ++ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) ++ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 ++ VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) ++ LEAQ (32*16)(inp), inp ++ SUBQ $(32*16), inl ++ CMPQ inl, $512 ++ JG sealAVX2MainLoop ++ ++ // Tail can only hash 480 bytes ++ polyAdd(0*8(oup)) ++ polyMulAVX2 ++ polyAdd(2*8(oup)) ++ polyMulAVX2 ++ LEAQ 32(oup), oup ++ ++ MOVQ $10, itr1 ++ MOVQ $0, itr2 ++ CMPQ inl, $128 ++ JBE sealAVX2Tail128 ++ CMPQ inl, $256 ++ JBE sealAVX2Tail256 ++ CMPQ inl, $384 ++ JBE sealAVX2Tail384 ++ JMP sealAVX2Tail512 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for buffers smaller than 193 bytes ++seal192AVX2: ++ // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks ++ VMOVDQA AA0, AA1 ++ VMOVDQA BB0, BB1 ++ VMOVDQA CC0, CC1 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA AA0, AA2 ++ VMOVDQA BB0, BB2 ++ VMOVDQA CC0, CC2 ++ VMOVDQA DD0, DD2 ++ VMOVDQA DD1, TT3 ++ MOVQ $10, itr2 ++ ++sealAVX2192InnerCipherLoop: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 ++ DECQ itr2 ++ JNE sealAVX2192InnerCipherLoop ++ VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 ++ VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 ++ VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 ++ VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ ++ // Clamp and store poly key ++ VPAND ·polyClampMask<>(SB), TT0, TT0 ++ VMOVDQA TT0, rsStoreAVX2 ++ ++ // Stream for up to 192 bytes ++ VPERM2I128 $0x13, AA0, BB0, AA0 ++ VPERM2I128 $0x13, CC0, DD0, BB0 ++ VPERM2I128 $0x02, AA1, BB1, CC0 ++ VPERM2I128 $0x02, CC1, DD1, DD0 ++ VPERM2I128 $0x13, AA1, BB1, AA1 ++ VPERM2I128 $0x13, CC1, DD1, BB1 ++ ++sealAVX2ShortSeal: ++ // Hash aad ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ XORQ itr1, itr1 ++ ++sealAVX2SealHash: ++ // itr1 holds the number of bytes encrypted but not yet hashed ++ CMPQ itr1, $16 ++ JB sealAVX2ShortSealLoop ++ polyAdd(0(oup)) ++ polyMul ++ SUBQ $16, itr1 ++ ADDQ $16, oup ++ JMP sealAVX2SealHash ++ ++sealAVX2ShortSealLoop: ++ CMPQ inl, $32 ++ JB sealAVX2ShortTail32 ++ SUBQ $32, inl ++ ++ // Load for encryption ++ VPXOR (inp), AA0, AA0 ++ VMOVDQU AA0, (oup) ++ LEAQ (1*32)(inp), inp ++ ++ // Now can hash ++ polyAdd(0*8(oup)) ++ polyMulAVX2 ++ polyAdd(2*8(oup)) ++ polyMulAVX2 ++ LEAQ (1*32)(oup), oup ++ ++ // Shift stream left ++ VMOVDQA BB0, AA0 ++ VMOVDQA CC0, BB0 ++ VMOVDQA DD0, CC0 ++ VMOVDQA AA1, DD0 ++ VMOVDQA BB1, AA1 ++ VMOVDQA CC1, BB1 ++ VMOVDQA DD1, CC1 ++ VMOVDQA AA2, DD1 ++ VMOVDQA BB2, AA2 ++ JMP sealAVX2ShortSealLoop ++ ++sealAVX2ShortTail32: ++ CMPQ inl, $16 ++ VMOVDQA A0, A1 ++ JB sealAVX2ShortDone ++ ++ SUBQ $16, inl ++ ++ // Load for encryption ++ VPXOR (inp), A0, T0 ++ VMOVDQU T0, (oup) ++ LEAQ (1*16)(inp), inp ++ ++ // Hash ++ polyAdd(0*8(oup)) ++ polyMulAVX2 ++ LEAQ (1*16)(oup), oup ++ VPERM2I128 $0x11, AA0, AA0, AA0 ++ VMOVDQA A0, A1 ++ ++sealAVX2ShortDone: ++ VZEROUPPER ++ JMP sealSSETail ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for buffers smaller than 321 bytes ++seal320AVX2: ++ // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks ++ VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 ++ VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 ++ MOVQ $10, itr2 ++ ++sealAVX2320InnerCipherLoop: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 ++ DECQ itr2 ++ JNE sealAVX2320InnerCipherLoop ++ ++ VMOVDQA ·chacha20Constants<>(SB), TT0 ++ VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 ++ VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 ++ VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 ++ VMOVDQA ·avx2IncMask<>(SB), TT0 ++ VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 ++ VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 ++ VPADDD TT3, DD2, DD2 ++ ++ // Clamp and store poly key ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ VPAND ·polyClampMask<>(SB), TT0, TT0 ++ VMOVDQA TT0, rsStoreAVX2 ++ ++ // Stream for up to 320 bytes ++ VPERM2I128 $0x13, AA0, BB0, AA0 ++ VPERM2I128 $0x13, CC0, DD0, BB0 ++ VPERM2I128 $0x02, AA1, BB1, CC0 ++ VPERM2I128 $0x02, CC1, DD1, DD0 ++ VPERM2I128 $0x13, AA1, BB1, AA1 ++ VPERM2I128 $0x13, CC1, DD1, BB1 ++ VPERM2I128 $0x02, AA2, BB2, CC1 ++ VPERM2I128 $0x02, CC2, DD2, DD1 ++ VPERM2I128 $0x13, AA2, BB2, AA2 ++ VPERM2I128 $0x13, CC2, DD2, BB2 ++ JMP sealAVX2ShortSeal ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 128 bytes of ciphertext ++sealAVX2Tail128: ++ // Need to decrypt up to 128 bytes - prepare two blocks ++ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed ++ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed ++ VMOVDQA ·chacha20Constants<>(SB), AA0 ++ VMOVDQA state1StoreAVX2, BB0 ++ VMOVDQA state2StoreAVX2, CC0 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0 ++ VMOVDQA DD0, DD1 ++ ++sealAVX2Tail128LoopA: ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealAVX2Tail128LoopB: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) ++ polyAdd(0(oup)) ++ polyMul ++ VPALIGNR $4, BB0, BB0, BB0 ++ VPALIGNR $8, CC0, CC0, CC0 ++ VPALIGNR $12, DD0, DD0, DD0 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) ++ polyAdd(16(oup)) ++ polyMul ++ LEAQ 32(oup), oup ++ VPALIGNR $12, BB0, BB0, BB0 ++ VPALIGNR $8, CC0, CC0, CC0 ++ VPALIGNR $4, DD0, DD0, DD0 ++ DECQ itr1 ++ JG sealAVX2Tail128LoopA ++ DECQ itr2 ++ JGE sealAVX2Tail128LoopB ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA1 ++ VPADDD state1StoreAVX2, BB0, BB1 ++ VPADDD state2StoreAVX2, CC0, CC1 ++ VPADDD DD1, DD0, DD1 ++ ++ VPERM2I128 $0x02, AA1, BB1, AA0 ++ VPERM2I128 $0x02, CC1, DD1, BB0 ++ VPERM2I128 $0x13, AA1, BB1, CC0 ++ VPERM2I128 $0x13, CC1, DD1, DD0 ++ JMP sealAVX2ShortSealLoop ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 256 bytes of ciphertext ++sealAVX2Tail256: ++ // Need to decrypt up to 256 bytes - prepare two blocks ++ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed ++ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA DD0, TT1 ++ VMOVDQA DD1, TT2 ++ ++sealAVX2Tail256LoopA: ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealAVX2Tail256LoopB: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ polyAdd(0(oup)) ++ polyMul ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ polyAdd(16(oup)) ++ polyMul ++ LEAQ 32(oup), oup ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 ++ DECQ itr1 ++ JG sealAVX2Tail256LoopA ++ DECQ itr2 ++ JGE sealAVX2Tail256LoopB ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 ++ VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ VPERM2I128 $0x02, CC0, DD0, TT1 ++ VPERM2I128 $0x13, AA0, BB0, TT2 ++ VPERM2I128 $0x13, CC0, DD0, TT3 ++ VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 ++ VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) ++ MOVQ $128, itr1 ++ LEAQ 128(inp), inp ++ SUBQ $128, inl ++ VPERM2I128 $0x02, AA1, BB1, AA0 ++ VPERM2I128 $0x02, CC1, DD1, BB0 ++ VPERM2I128 $0x13, AA1, BB1, CC0 ++ VPERM2I128 $0x13, CC1, DD1, DD0 ++ ++ JMP sealAVX2SealHash ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 384 bytes of ciphertext ++sealAVX2Tail384: ++ // Need to decrypt up to 384 bytes - prepare two blocks ++ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed ++ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 ++ VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 ++ ++sealAVX2Tail384LoopA: ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealAVX2Tail384LoopB: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ polyAdd(0(oup)) ++ polyMul ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ polyAdd(16(oup)) ++ polyMul ++ LEAQ 32(oup), oup ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 ++ DECQ itr1 ++ JG sealAVX2Tail384LoopA ++ DECQ itr2 ++ JGE sealAVX2Tail384LoopB ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 ++ VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ VPERM2I128 $0x02, CC0, DD0, TT1 ++ VPERM2I128 $0x13, AA0, BB0, TT2 ++ VPERM2I128 $0x13, CC0, DD0, TT3 ++ VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 ++ VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) ++ VPERM2I128 $0x02, AA1, BB1, TT0 ++ VPERM2I128 $0x02, CC1, DD1, TT1 ++ VPERM2I128 $0x13, AA1, BB1, TT2 ++ VPERM2I128 $0x13, CC1, DD1, TT3 ++ VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 ++ VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) ++ MOVQ $256, itr1 ++ LEAQ 256(inp), inp ++ SUBQ $256, inl ++ VPERM2I128 $0x02, AA2, BB2, AA0 ++ VPERM2I128 $0x02, CC2, DD2, BB0 ++ VPERM2I128 $0x13, AA2, BB2, CC0 ++ VPERM2I128 $0x13, CC2, DD2, DD0 ++ ++ JMP sealAVX2SealHash ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 512 bytes of ciphertext ++sealAVX2Tail512: ++ // Need to decrypt up to 512 bytes - prepare two blocks ++ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed ++ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 ++ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 ++ ++sealAVX2Tail512LoopA: ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealAVX2Tail512LoopB: ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyAdd(0*8(oup)) ++ polyMulAVX2 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyAdd(2*8(oup)) ++ polyMulAVX2 ++ LEAQ (4*8)(oup), oup ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 ++ ++ DECQ itr1 ++ JG sealAVX2Tail512LoopA ++ DECQ itr2 ++ JGE sealAVX2Tail512LoopB ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPERM2I128 $0x02, AA0, BB0, CC3 ++ VPXOR (0*32)(inp), CC3, CC3 ++ VMOVDQU CC3, (0*32)(oup) ++ VPERM2I128 $0x02, CC0, DD0, CC3 ++ VPXOR (1*32)(inp), CC3, CC3 ++ VMOVDQU CC3, (1*32)(oup) ++ VPERM2I128 $0x13, AA0, BB0, CC3 ++ VPXOR (2*32)(inp), CC3, CC3 ++ VMOVDQU CC3, (2*32)(oup) ++ VPERM2I128 $0x13, CC0, DD0, CC3 ++ VPXOR (3*32)(inp), CC3, CC3 ++ VMOVDQU CC3, (3*32)(oup) ++ ++ VPERM2I128 $0x02, AA1, BB1, AA0 ++ VPERM2I128 $0x02, CC1, DD1, BB0 ++ VPERM2I128 $0x13, AA1, BB1, CC0 ++ VPERM2I128 $0x13, CC1, DD1, DD0 ++ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) ++ ++ VPERM2I128 $0x02, AA2, BB2, AA0 ++ VPERM2I128 $0x02, CC2, DD2, BB0 ++ VPERM2I128 $0x13, AA2, BB2, CC0 ++ VPERM2I128 $0x13, CC2, DD2, DD0 ++ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) ++ ++ MOVQ $384, itr1 ++ LEAQ 384(inp), inp ++ SUBQ $384, inl ++ VPERM2I128 $0x02, AA3, BB3, AA0 ++ VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 ++ VPERM2I128 $0x13, AA3, BB3, CC0 ++ VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 ++ ++ JMP sealAVX2SealHash +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go +new file mode 100644 +index 00000000000..6313898f0a7 +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go +@@ -0,0 +1,81 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package chacha20poly1305 ++ ++import ( ++ "encoding/binary" ++ ++ "golang.org/x/crypto/chacha20" ++ "golang.org/x/crypto/internal/alias" ++ "golang.org/x/crypto/internal/poly1305" ++) ++ ++func writeWithPadding(p *poly1305.MAC, b []byte) { ++ p.Write(b) ++ if rem := len(b) % 16; rem != 0 { ++ var buf [16]byte ++ padLen := 16 - rem ++ p.Write(buf[:padLen]) ++ } ++} ++ ++func writeUint64(p *poly1305.MAC, n int) { ++ var buf [8]byte ++ binary.LittleEndian.PutUint64(buf[:], uint64(n)) ++ p.Write(buf[:]) ++} ++ ++func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte { ++ ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) ++ ciphertext, tag := out[:len(plaintext)], out[len(plaintext):] ++ if alias.InexactOverlap(out, plaintext) { ++ panic("chacha20poly1305: invalid buffer overlap") ++ } ++ ++ var polyKey [32]byte ++ s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) ++ s.XORKeyStream(polyKey[:], polyKey[:]) ++ s.SetCounter(1) // set the counter to 1, skipping 32 bytes ++ s.XORKeyStream(ciphertext, plaintext) ++ ++ p := poly1305.New(&polyKey) ++ writeWithPadding(p, additionalData) ++ writeWithPadding(p, ciphertext) ++ writeUint64(p, len(additionalData)) ++ writeUint64(p, len(plaintext)) ++ p.Sum(tag[:0]) ++ ++ return ret ++} ++ ++func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { ++ tag := ciphertext[len(ciphertext)-16:] ++ ciphertext = ciphertext[:len(ciphertext)-16] ++ ++ var polyKey [32]byte ++ s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) ++ s.XORKeyStream(polyKey[:], polyKey[:]) ++ s.SetCounter(1) // set the counter to 1, skipping 32 bytes ++ ++ p := poly1305.New(&polyKey) ++ writeWithPadding(p, additionalData) ++ writeWithPadding(p, ciphertext) ++ writeUint64(p, len(additionalData)) ++ writeUint64(p, len(ciphertext)) ++ ++ ret, out := sliceForAppend(dst, len(ciphertext)) ++ if alias.InexactOverlap(out, ciphertext) { ++ panic("chacha20poly1305: invalid buffer overlap") ++ } ++ if !p.Verify(tag) { ++ for i := range out { ++ out[i] = 0 ++ } ++ return nil, errOpen ++ } ++ ++ s.XORKeyStream(out, ciphertext) ++ return ret, nil ++} +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go +new file mode 100644 +index 00000000000..34e6ab1df88 +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go +@@ -0,0 +1,15 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build !amd64 || !gc || purego ++ ++package chacha20poly1305 ++ ++func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { ++ return c.sealGeneric(dst, nonce, plaintext, additionalData) ++} ++ ++func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { ++ return c.openGeneric(dst, nonce, ciphertext, additionalData) ++} +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go +new file mode 100644 +index 00000000000..1cebfe946f4 +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go +@@ -0,0 +1,86 @@ ++// Copyright 2018 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package chacha20poly1305 ++ ++import ( ++ "crypto/cipher" ++ "errors" ++ ++ "golang.org/x/crypto/chacha20" ++) ++ ++type xchacha20poly1305 struct { ++ key [KeySize]byte ++} ++ ++// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key. ++// ++// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce, ++// suitable to be generated randomly without risk of collisions. It should be ++// preferred when nonce uniqueness cannot be trivially ensured, or whenever ++// nonces are randomly generated. ++func NewX(key []byte) (cipher.AEAD, error) { ++ if len(key) != KeySize { ++ return nil, errors.New("chacha20poly1305: bad key length") ++ } ++ ret := new(xchacha20poly1305) ++ copy(ret.key[:], key) ++ return ret, nil ++} ++ ++func (*xchacha20poly1305) NonceSize() int { ++ return NonceSizeX ++} ++ ++func (*xchacha20poly1305) Overhead() int { ++ return Overhead ++} ++ ++func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { ++ if len(nonce) != NonceSizeX { ++ panic("chacha20poly1305: bad nonce length passed to Seal") ++ } ++ ++ // XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no ++ // size limit. However, since we reuse the ChaCha20-Poly1305 implementation, ++ // the second half of the counter is not available. This is unlikely to be ++ // an issue because the cipher.AEAD API requires the entire message to be in ++ // memory, and the counter overflows at 256 GB. ++ if uint64(len(plaintext)) > (1<<38)-64 { ++ panic("chacha20poly1305: plaintext too large") ++ } ++ ++ c := new(chacha20poly1305) ++ hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) ++ copy(c.key[:], hKey) ++ ++ // The first 4 bytes of the final nonce are unused counter space. ++ cNonce := make([]byte, NonceSize) ++ copy(cNonce[4:12], nonce[16:24]) ++ ++ return c.seal(dst, cNonce[:], plaintext, additionalData) ++} ++ ++func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { ++ if len(nonce) != NonceSizeX { ++ panic("chacha20poly1305: bad nonce length passed to Open") ++ } ++ if len(ciphertext) < 16 { ++ return nil, errOpen ++ } ++ if uint64(len(ciphertext)) > (1<<38)-48 { ++ panic("chacha20poly1305: ciphertext too large") ++ } ++ ++ c := new(chacha20poly1305) ++ hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) ++ copy(c.key[:], hKey) ++ ++ // The first 4 bytes of the final nonce are unused counter space. ++ cNonce := make([]byte, NonceSize) ++ copy(cNonce[4:12], nonce[16:24]) ++ ++ return c.open(dst, cNonce[:], ciphertext, additionalData) ++} +diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go +new file mode 100644 +index 00000000000..f4ded5fee2f +--- /dev/null ++++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go +@@ -0,0 +1,95 @@ ++// Copyright 2014 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation ++// Function (HKDF) as defined in RFC 5869. ++// ++// HKDF is a cryptographic key derivation function (KDF) with the goal of ++// expanding limited input keying material into one or more cryptographically ++// strong secret keys. ++package hkdf // import "golang.org/x/crypto/hkdf" ++ ++import ( ++ "crypto/hmac" ++ "errors" ++ "hash" ++ "io" ++) ++ ++// Extract generates a pseudorandom key for use with Expand from an input secret ++// and an optional independent salt. ++// ++// Only use this function if you need to reuse the extracted key with multiple ++// Expand invocations and different context values. Most common scenarios, ++// including the generation of multiple keys, should use New instead. ++func Extract(hash func() hash.Hash, secret, salt []byte) []byte { ++ if salt == nil { ++ salt = make([]byte, hash().Size()) ++ } ++ extractor := hmac.New(hash, salt) ++ extractor.Write(secret) ++ return extractor.Sum(nil) ++} ++ ++type hkdf struct { ++ expander hash.Hash ++ size int ++ ++ info []byte ++ counter byte ++ ++ prev []byte ++ buf []byte ++} ++ ++func (f *hkdf) Read(p []byte) (int, error) { ++ // Check whether enough data can be generated ++ need := len(p) ++ remains := len(f.buf) + int(255-f.counter+1)*f.size ++ if remains < need { ++ return 0, errors.New("hkdf: entropy limit reached") ++ } ++ // Read any leftover from the buffer ++ n := copy(p, f.buf) ++ p = p[n:] ++ ++ // Fill the rest of the buffer ++ for len(p) > 0 { ++ if f.counter > 1 { ++ f.expander.Reset() ++ } ++ f.expander.Write(f.prev) ++ f.expander.Write(f.info) ++ f.expander.Write([]byte{f.counter}) ++ f.prev = f.expander.Sum(f.prev[:0]) ++ f.counter++ ++ ++ // Copy the new batch into p ++ f.buf = f.prev ++ n = copy(p, f.buf) ++ p = p[n:] ++ } ++ // Save leftovers for next run ++ f.buf = f.buf[n:] ++ ++ return need, nil ++} ++ ++// Expand returns a Reader, from which keys can be read, using the given ++// pseudorandom key and optional context info, skipping the extraction step. ++// ++// The pseudorandomKey should have been generated by Extract, or be a uniformly ++// random or pseudorandom cryptographically strong key. See RFC 5869, Section ++// 3.3. Most common scenarios will want to use New instead. ++func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { ++ expander := hmac.New(hash, pseudorandomKey) ++ return &hkdf{expander, expander.Size(), info, 1, nil, nil} ++} ++ ++// New returns a Reader, from which keys can be read, using the given hash, ++// secret, salt and context info. Salt and info can be nil. ++func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { ++ prk := Extract(hash, secret, salt) ++ return Expand(hash, prk, info) ++} +diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go +index 16c6c6b90ce..e61587945b0 100644 +--- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go ++++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build appengine +-// +build appengine + + // This file applies to App Engine first generation runtimes (<= Go 1.9). + +diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +index a7e27b3d299..9c79aa0a0cc 100644 +--- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go ++++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build !appengine +-// +build !appengine + + // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. + +diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go +index b3e8783cc59..2cf71f0f93f 100644 +--- a/vendor/golang.org/x/oauth2/google/default.go ++++ b/vendor/golang.org/x/oauth2/google/default.go +@@ -8,7 +8,6 @@ import ( + "context" + "encoding/json" + "fmt" +- "io/ioutil" + "net/http" + "os" + "path/filepath" +@@ -142,10 +141,8 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar + + // Second, try a well-known file. + filename := wellKnownFile() +- if creds, err := readCredentialsFile(ctx, filename, params); err == nil { +- return creds, nil +- } else if !os.IsNotExist(err) { +- return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) ++ if b, err := os.ReadFile(filename); err == nil { ++ return CredentialsFromJSONWithParams(ctx, b, params) + } + + // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) +@@ -231,7 +228,7 @@ func wellKnownFile() string { + } + + func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { +- b, err := ioutil.ReadFile(filename) ++ b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } +diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go +index e1755d1d9ac..d28140f789e 100644 +--- a/vendor/golang.org/x/oauth2/internal/client_appengine.go ++++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build appengine +-// +build appengine + + package internal + +diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go +index c0ab196cf46..14989beaf49 100644 +--- a/vendor/golang.org/x/oauth2/internal/oauth2.go ++++ b/vendor/golang.org/x/oauth2/internal/oauth2.go +@@ -14,7 +14,7 @@ import ( + + // ParseKey converts the binary contents of a private key file + // to an *rsa.PrivateKey. It detects whether the private key is in a +-// PEM container or not. If so, it extracts the the private key ++// PEM container or not. If so, it extracts the private key + // from PEM container before conversion. It only supports PEM + // containers with no passphrase. + func ParseKey(key []byte) (*rsa.PrivateKey, error) { +diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go +index b4723fcacea..58901bda53e 100644 +--- a/vendor/golang.org/x/oauth2/internal/token.go ++++ b/vendor/golang.org/x/oauth2/internal/token.go +@@ -55,12 +55,18 @@ type Token struct { + } + + // tokenJSON is the struct representing the HTTP response from OAuth2 +-// providers returning a token in JSON form. ++// providers returning a token or error in JSON form. ++// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 + type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number ++ // error fields ++ // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 ++ ErrorCode string `json:"error"` ++ ErrorDescription string `json:"error_description"` ++ ErrorURI string `json:"error_uri"` + } + + func (e *tokenJSON) expiry() (t time.Time) { +@@ -236,21 +242,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } +- if code := r.StatusCode; code < 200 || code > 299 { +- return nil, &RetrieveError{ +- Response: r, +- Body: body, +- } ++ ++ failureStatus := r.StatusCode < 200 || r.StatusCode > 299 ++ retrieveError := &RetrieveError{ ++ Response: r, ++ Body: body, ++ // attempt to populate error detail below + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": ++ // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { +- return nil, err ++ if failureStatus { ++ return nil, retrieveError ++ } ++ return nil, fmt.Errorf("oauth2: cannot parse response: %v", err) + } ++ retrieveError.ErrorCode = vals.Get("error") ++ retrieveError.ErrorDescription = vals.Get("error_description") ++ retrieveError.ErrorURI = vals.Get("error_uri") + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), +@@ -265,8 +279,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { +- return nil, err ++ if failureStatus { ++ return nil, retrieveError ++ } ++ return nil, fmt.Errorf("oauth2: cannot parse json: %v", err) + } ++ retrieveError.ErrorCode = tj.ErrorCode ++ retrieveError.ErrorDescription = tj.ErrorDescription ++ retrieveError.ErrorURI = tj.ErrorURI + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, +@@ -276,17 +296,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } ++ // according to spec, servers should respond status 400 in error case ++ // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 ++ // but some unorthodox servers respond 200 in error case ++ if failureStatus || retrieveError.ErrorCode != "" { ++ return nil, retrieveError ++ } + if token.AccessToken == "" { + return nil, errors.New("oauth2: server response missing access_token") + } + return token, nil + } + ++// mirrors oauth2.RetrieveError + type RetrieveError struct { +- Response *http.Response +- Body []byte ++ Response *http.Response ++ Body []byte ++ ErrorCode string ++ ErrorDescription string ++ ErrorURI string + } + + func (r *RetrieveError) Error() string { ++ if r.ErrorCode != "" { ++ s := fmt.Sprintf("oauth2: %q", r.ErrorCode) ++ if r.ErrorDescription != "" { ++ s += fmt.Sprintf(" %q", r.ErrorDescription) ++ } ++ if r.ErrorURI != "" { ++ s += fmt.Sprintf(" %q", r.ErrorURI) ++ } ++ return s ++ } + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) + } +diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go +index 7c64006de69..5ffce9764be 100644 +--- a/vendor/golang.org/x/oauth2/token.go ++++ b/vendor/golang.org/x/oauth2/token.go +@@ -175,14 +175,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) + } + + // RetrieveError is the error returned when the token endpoint returns a +-// non-2XX HTTP status code. ++// non-2XX HTTP status code or populates RFC 6749's 'error' parameter. ++// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + type RetrieveError struct { + Response *http.Response + // Body is the body that was consumed by reading Response.Body. + // It may be truncated. + Body []byte ++ // ErrorCode is RFC 6749's 'error' parameter. ++ ErrorCode string ++ // ErrorDescription is RFC 6749's 'error_description' parameter. ++ ErrorDescription string ++ // ErrorURI is RFC 6749's 'error_uri' parameter. ++ ErrorURI string + } + + func (r *RetrieveError) Error() string { ++ if r.ErrorCode != "" { ++ s := fmt.Sprintf("oauth2: %q", r.ErrorCode) ++ if r.ErrorDescription != "" { ++ s += fmt.Sprintf(" %q", r.ErrorDescription) ++ } ++ if r.ErrorURI != "" { ++ s += fmt.Sprintf(" %q", r.ErrorURI) ++ } ++ return s ++ } + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) + } +diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +index b1361722dca..c704bad622a 100644 +--- a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json ++++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +@@ -5339,7 +5339,7 @@ + ], + "parameters": { + "ipAddress": { +- "description": "The ip_address could be external IPv4, or internal IPv4 within IPv6 form of virtual_network_id with internal IPv4. IPv6 is not supported yet.", ++ "description": "The VM IP address.", + "location": "query", + "type": "string" + }, +@@ -9429,6 +9429,7 @@ + ] + }, + "setAutoHealingPolicies": { ++ "deprecated": true, + "description": "Motifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use instanceGroupManagers.patch instead.", + "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + "httpMethod": "POST", +@@ -12543,6 +12544,55 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified instance. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", ++ "httpMethod": "POST", ++ "id": "compute.instances.setSecurityPolicy", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "instance" ++ ], ++ "parameters": { ++ "instance": { ++ "description": "Name of the Instance resource to which the security policy should be set. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "Name of the zone scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", ++ "request": { ++ "$ref": "InstancesSetSecurityPolicyRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setServiceAccount": { + "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", +@@ -16301,6 +16351,56 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "patch": { ++ "description": "Patches the specified NetworkAttachment resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", ++ "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", ++ "httpMethod": "PATCH", ++ "id": "compute.networkAttachments.patch", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "networkAttachment" ++ ], ++ "parameters": { ++ "networkAttachment": { ++ "description": "Name of the NetworkAttachment resource to patch.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", ++ "request": { ++ "$ref": "NetworkAttachment" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", +@@ -22849,6 +22949,100 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "calculateCancellationFee": { ++ "description": "Calculate cancellation fee for the specified commitment.", ++ "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}/calculateCancellationFee", ++ "httpMethod": "POST", ++ "id": "compute.regionCommitments.calculateCancellationFee", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "commitment" ++ ], ++ "parameters": { ++ "commitment": { ++ "description": "Name of the commitment to delete.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/commitments/{commitment}/calculateCancellationFee", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "cancel": { ++ "description": "Cancel the specified commitment.", ++ "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}/cancel", ++ "httpMethod": "POST", ++ "id": "compute.regionCommitments.cancel", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "commitment" ++ ], ++ "parameters": { ++ "commitment": { ++ "description": "Name of the commitment to delete.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/commitments/{commitment}/cancel", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "get": { + "description": "Returns the specified commitment resource.", + "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", +@@ -25729,6 +25923,7 @@ + ] + }, + "setAutoHealingPolicies": { ++ "deprecated": true, + "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use regionInstanceGroupManagers.patch instead.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + "httpMethod": "POST", +@@ -28079,6 +28274,56 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "patchAssociation": { ++ "description": "Updates an association for the specified network firewall policy.", ++ "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchAssociation", ++ "httpMethod": "POST", ++ "id": "compute.regionNetworkFirewallPolicies.patchAssociation", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "firewallPolicy" ++ ], ++ "parameters": { ++ "firewallPolicy": { ++ "description": "Name of the firewall policy to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchAssociation", ++ "request": { ++ "$ref": "FirewallPolicyAssociation" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "patchRule": { + "description": "Patches a rule of the specified priority.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchRule", +@@ -32495,6 +32740,53 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "getNatIpInfo": { ++ "description": "Retrieves runtime NAT IP information.", ++ "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", ++ "httpMethod": "GET", ++ "id": "compute.routers.getNatIpInfo", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "router" ++ ], ++ "parameters": { ++ "natName": { ++ "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "router": { ++ "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", ++ "response": { ++ "$ref": "NatIpInfoResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, + "getNatMappingInfo": { + "description": "Retrieves runtime Nat mapping information of VM endpoints.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo", +@@ -34034,11 +34326,6 @@ + "required": true, + "type": "string" + }, +- "reconcileConnections": { +- "description": "This flag determines how to change the status of consumer connections, when the connection policy for the corresponding project or network is modified. If the flag is false, the default case, then existing ACCEPTED and REJECTED consumer connections stay in that state. For example, even if the project is removed from the accept list, existing ACCEPTED connections will stay the same. If the flag is true, then the connection can change from ACCEPTED or REJECTED to pending when the connection policy is modified. For example, if a project is removed from the reject list, its existing REJECTED connections will move to the PENDING state. If the project is also added to the accept list, then those connections will move to the ACCEPTED state.", +- "location": "query", +- "type": "boolean" +- }, + "region": { + "description": "The region scoping this request and should conform to RFC1035.", + "location": "path", +@@ -35106,13 +35393,13 @@ + } + } + }, +- "subnetworks": { ++ "storagePools": { + "methods": { + "aggregatedList": { +- "description": "Retrieves an aggregated list of subnetworks.", +- "flatPath": "projects/{project}/aggregated/subnetworks", ++ "description": "Retrieves an aggregated list of storage pools.", ++ "flatPath": "projects/{project}/aggregated/storagePools", + "httpMethod": "GET", +- "id": "compute.subnetworks.aggregatedList", ++ "id": "compute.storagePools.aggregatedList", + "parameterOrder": [ + "project" + ], +@@ -35158,9 +35445,9 @@ + "type": "boolean" + } + }, +- "path": "projects/{project}/aggregated/subnetworks", ++ "path": "projects/{project}/aggregated/storagePools", + "response": { +- "$ref": "SubnetworkAggregatedList" ++ "$ref": "StoragePoolAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -35169,14 +35456,14 @@ + ] + }, + "delete": { +- "description": "Deletes the specified subnetwork.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "description": "Deletes the specified storage pool. Deleting a storagePool removes its data permanently and is irreversible. However, deleting a storagePool does not delete any snapshots previously made from the storagePool. You must separately delete snapshots.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", + "httpMethod": "DELETE", +- "id": "compute.subnetworks.delete", ++ "id": "compute.storagePools.delete", + "parameterOrder": [ + "project", +- "region", +- "subnetwork" ++ "zone", ++ "storagePool" + ], + "parameters": { + "project": { +@@ -35186,27 +35473,26 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- "required": true, +- "type": "string" +- }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, +- "subnetwork": { +- "description": "Name of the Subnetwork resource to delete.", ++ "storagePool": { ++ "description": "Name of the storage pool to delete.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", + "response": { + "$ref": "Operation" + }, +@@ -35215,15 +35501,15 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "expandIpCidrRange": { +- "description": "Expands the IP CIDR range of the subnetwork to a specified value.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", +- "httpMethod": "POST", +- "id": "compute.subnetworks.expandIpCidrRange", ++ "get": { ++ "description": "Returns a specified storage pool. Gets a list of available storage pools by making a list() request.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ "httpMethod": "GET", ++ "id": "compute.storagePools.get", + "parameterOrder": [ + "project", +- "region", +- "subnetwork" ++ "zone", ++ "storagePool" + ], + "parameters": { + "project": { +@@ -35233,29 +35519,113 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", ++ "storagePool": { ++ "description": "Name of the storage pool to return.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ "response": { ++ "$ref": "StoragePool" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "getIamPolicy": { ++ "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/getIamPolicy", ++ "httpMethod": "GET", ++ "id": "compute.storagePools.getIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "resource" ++ ], ++ "parameters": { ++ "optionsRequestedPolicyVersion": { ++ "description": "Requested IAM Policy version.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/storagePools/{resource}/getIamPolicy", ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Creates a storage pool in the specified project using the data in the request.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools", ++ "httpMethod": "POST", ++ "id": "compute.storagePools.insert", ++ "parameterOrder": [ ++ "project", ++ "zone" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, +- "subnetwork": { +- "description": "Name of the Subnetwork resource to update.", ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", ++ "path": "projects/{project}/zones/{zone}/storagePools", + "request": { +- "$ref": "SubnetworksExpandIpCidrRangeRequest" ++ "$ref": "StoragePool" + }, + "response": { + "$ref": "Operation" +@@ -35265,17 +35635,39 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "get": { +- "description": "Returns the specified subnetwork.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "list": { ++ "description": "Retrieves a list of storage pools contained within the specified zone.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools", + "httpMethod": "GET", +- "id": "compute.subnetworks.get", ++ "id": "compute.storagePools.list", + "parameterOrder": [ + "project", +- "region", +- "subnetwork" ++ "zone" + ], + "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -35283,48 +35675,85 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/storagePools", ++ "response": { ++ "$ref": "StoragePoolList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "setIamPolicy": { ++ "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/setIamPolicy", ++ "httpMethod": "POST", ++ "id": "compute.storagePools.setIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" + }, +- "subnetwork": { +- "description": "Name of the Subnetwork resource to return.", ++ "resource": { ++ "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "path": "projects/{project}/zones/{zone}/storagePools/{resource}/setIamPolicy", ++ "request": { ++ "$ref": "ZoneSetPolicyRequest" ++ }, + "response": { +- "$ref": "Subnetwork" ++ "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" ++ "https://www.googleapis.com/auth/compute" + ] + }, +- "getIamPolicy": { +- "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", +- "httpMethod": "GET", +- "id": "compute.subnetworks.getIamPolicy", ++ "setLabels": { ++ "description": "Sets the labels on a storage pools. To learn more about labels, read the Labeling Resources documentation.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/setLabels", ++ "httpMethod": "POST", ++ "id": "compute.storagePools.setLabels", + "parameterOrder": [ + "project", +- "region", ++ "zone", + "resource" + ], + "parameters": { +- "optionsRequestedPolicyVersion": { +- "description": "Requested IAM Policy version.", +- "format": "int32", +- "location": "query", +- "type": "integer" +- }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -35332,12 +35761,55 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "The name of the region for this request.", ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/storagePools/{resource}/setLabels", ++ "request": { ++ "$ref": "ZoneSetLabelsRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/testIamPermissions", ++ "httpMethod": "POST", ++ "id": "compute.storagePools.testIamPermissions", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", +@@ -35345,11 +35817,21 @@ + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", ++ "path": "projects/{project}/zones/{zone}/storagePools/{resource}/testIamPermissions", ++ "request": { ++ "$ref": "TestPermissionsRequest" ++ }, + "response": { +- "$ref": "Policy" ++ "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -35357,14 +35839,15 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "insert": { +- "description": "Creates a subnetwork in the specified project using the data included in the request.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks", +- "httpMethod": "POST", +- "id": "compute.subnetworks.insert", ++ "update": { ++ "description": "Updates the specified storagePool with the data included in the request. The update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: size_tb and provisioned_iops.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ "httpMethod": "PATCH", ++ "id": "compute.storagePools.update", + "parameterOrder": [ + "project", +- "region" ++ "zone", ++ "storagePool" + ], + "parameters": { + "project": { +@@ -35374,22 +35857,35 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "storagePool": { ++ "description": "The storagePool name for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "requestId": { +- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "updateMask": { ++ "description": "update_mask indicates fields to be updated as part of this request.", ++ "format": "google-fieldmask", + "location": "query", + "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/subnetworks", ++ "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", + "request": { +- "$ref": "Subnetwork" ++ "$ref": "StoragePool" + }, + "response": { + "$ref": "Operation" +@@ -35398,15 +35894,310 @@ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] +- }, +- "list": { +- "description": "Retrieves a list of subnetworks available to the specified project.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks", ++ } ++ } ++ }, ++ "subnetworks": { ++ "methods": { ++ "aggregatedList": { ++ "description": "Retrieves an aggregated list of subnetworks.", ++ "flatPath": "projects/{project}/aggregated/subnetworks", + "httpMethod": "GET", +- "id": "compute.subnetworks.list", ++ "id": "compute.subnetworks.aggregatedList", + "parameterOrder": [ +- "project", +- "region" ++ "project" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "includeAllScopes": { ++ "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ "location": "query", ++ "type": "boolean" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/aggregated/subnetworks", ++ "response": { ++ "$ref": "SubnetworkAggregatedList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "delete": { ++ "description": "Deletes the specified subnetwork.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "httpMethod": "DELETE", ++ "id": "compute.subnetworks.delete", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "subnetwork" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "subnetwork": { ++ "description": "Name of the Subnetwork resource to delete.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "expandIpCidrRange": { ++ "description": "Expands the IP CIDR range of the subnetwork to a specified value.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", ++ "httpMethod": "POST", ++ "id": "compute.subnetworks.expandIpCidrRange", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "subnetwork" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "subnetwork": { ++ "description": "Name of the Subnetwork resource to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", ++ "request": { ++ "$ref": "SubnetworksExpandIpCidrRangeRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "get": { ++ "description": "Returns the specified subnetwork.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "httpMethod": "GET", ++ "id": "compute.subnetworks.get", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "subnetwork" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "subnetwork": { ++ "description": "Name of the Subnetwork resource to return.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "response": { ++ "$ref": "Subnetwork" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "getIamPolicy": { ++ "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", ++ "httpMethod": "GET", ++ "id": "compute.subnetworks.getIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "resource" ++ ], ++ "parameters": { ++ "optionsRequestedPolicyVersion": { ++ "description": "Requested IAM Policy version.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Creates a subnetwork in the specified project using the data included in the request.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks", ++ "httpMethod": "POST", ++ "id": "compute.subnetworks.insert", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/subnetworks", ++ "request": { ++ "$ref": "Subnetwork" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "list": { ++ "description": "Retrieves a list of subnetworks available to the specified project.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks", ++ "httpMethod": "GET", ++ "id": "compute.subnetworks.list", ++ "parameterOrder": [ ++ "project", ++ "region" + ], + "parameters": { + "filter": { +@@ -37088,6 +37879,55 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified target instance. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ "httpMethod": "POST", ++ "id": "compute.targetInstances.setSecurityPolicy", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "targetInstance" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "targetInstance": { ++ "description": "Name of the TargetInstance resource to which the security policy should be set. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "Name of the zone scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ "request": { ++ "$ref": "SecurityPolicyReference" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", +@@ -37695,6 +38535,55 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified target pool. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ "httpMethod": "POST", ++ "id": "compute.targetPools.setSecurityPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "targetPool" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "targetPool": { ++ "description": "Name of the TargetPool resource to which the security policy should be set. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ "request": { ++ "$ref": "SecurityPolicyReference" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{resource}/testIamPermissions", +@@ -40632,7 +41521,7 @@ + } + } + }, +- "revision": "20230307", ++ "revision": "20230516", + "rootUrl": "https://compute.googleapis.com/", + "schemas": { + "AWSV4Signature": { +@@ -41079,11 +41968,11 @@ + "id": "AccessConfig", + "properties": { + "externalIpv6": { +- "description": "The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", ++ "description": "Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", + "type": "string" + }, + "externalIpv6PrefixLength": { +- "description": "The prefix length of the external IPv6 range.", ++ "description": "Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range.", + "format": "int32", + "type": "integer" + }, +@@ -41093,11 +41982,11 @@ + "type": "string" + }, + "name": { +- "description": "The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.", ++ "description": "The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6.", + "type": "string" + }, + "natIP": { +- "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", ++ "description": "Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", + "type": "string" + }, + "networkTier": { +@@ -41139,8 +42028,7 @@ + "type": "boolean" + }, + "type": { +- "default": "ONE_TO_ONE_NAT", +- "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", ++ "description": "The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6.", + "enum": [ + "DIRECT_IPV6", + "ONE_TO_ONE_NAT" +@@ -41778,6 +42666,20 @@ + "" + ], + "type": "string" ++ }, ++ "workloadType": { ++ "description": "The workload type of the instances that will target this reservation.", ++ "enum": [ ++ "BATCH", ++ "SERVING", ++ "UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "Reserved resources will be optimized for BATCH workloads, such as ML training.", ++ "Reserved resources will be optimized for SERVING workloads, such as ML inference.", ++ "" ++ ], ++ "type": "string" + } + }, + "type": "object" +@@ -42114,6 +43016,10 @@ + "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you specify this field when creating a VM, you can provide either the full or partial URL. For example, the following values are valid: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType If you specify this field when creating or updating an instance template or all-instances configuration, specify the type of the disk, not the URL. For example: pd-standard.", + "type": "string" + }, ++ "enableConfidentialCompute": { ++ "description": "Whether this disk is using confidential compute mode.", ++ "type": "boolean" ++ }, + "guestOsFeatures": { + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. Guest OS features are applied by merging initializeParams.guestOsFeatures and disks.guestOsFeatures", + "items": { +@@ -42225,6 +43131,10 @@ + "sourceSnapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source snapshot." ++ }, ++ "storagePool": { ++ "description": "The storage pool in which the new disk is created. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /storagePools/storagePool - projects/project/zones/zone/storagePools/storagePool - zones/zone/storagePools/storagePool ", ++ "type": "string" + } + }, + "type": "object" +@@ -42872,7 +43782,7 @@ + "id": "AutoscalingPolicy", + "properties": { + "coolDownPeriodSec": { +- "description": "The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", ++ "description": "The number of seconds that your application takes to initialize on a VM instance. This is referred to as the [initialization period](/compute/docs/autoscaler#cool_down_period). Specifying an accurate initialization period improves autoscaler decisions. For example, when scaling out, the autoscaler ignores data from VMs that are still initializing because those VMs might not yet represent normal usage of your application. The default initialization period is 60 seconds. Initialization periods might vary because of numerous factors. We recommend that you test how long your application takes to initialize. To do this, create a VM and time your application's startup process.", + "format": "int32", + "type": "integer" + }, +@@ -42902,7 +43812,7 @@ + "type": "integer" + }, + "mode": { +- "description": "Defines operating mode for this policy.", ++ "description": "Defines the operating mode for this policy. The following modes are available: - OFF: Disables the autoscaler but maintains its configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM instances only. - ON: Enables all autoscaler activities according to its policy. For more information, see \"Turning off or restricting an autoscaler\"", + "enum": [ + "OFF", + "ON", +@@ -43654,6 +44564,13 @@ + "$ref": "Duration", + "description": "Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED." + }, ++ "metadatas": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Deployment metadata associated with the resource to be set by a GKE hub controller and read by the backend RCTH", ++ "type": "object" ++ }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +@@ -44805,10 +45722,6 @@ + "format": "int64", + "type": "string" + }, +- "instance": { +- "$ref": "Instance", +- "description": "DEPRECATED: Please use instance_properties instead." +- }, + "instanceProperties": { + "$ref": "InstanceProperties", + "description": "The instance properties defining the VM instances to be created. Required if sourceInstanceTemplate is not provided." +@@ -44844,6 +45757,10 @@ + "description": "Per-instance properties to be set on individual instances. To be extended in the future.", + "id": "BulkInsertInstanceResourcePerInstanceProperties", + "properties": { ++ "hostname": { ++ "description": "Specifies the hostname of the instance. More details in: https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention", ++ "type": "string" ++ }, + "name": { + "description": "This field is only temporary. It will be removed. Do not use it.", + "type": "string" +@@ -44851,6 +45768,48 @@ + }, + "type": "object" + }, ++ "BulkInsertOperationStatus": { ++ "id": "BulkInsertOperationStatus", ++ "properties": { ++ "createdVmCount": { ++ "description": "[Output Only] Count of VMs successfully created so far.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "deletedVmCount": { ++ "description": "[Output Only] Count of VMs that got deleted during rollback.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "failedToCreateVmCount": { ++ "description": "[Output Only] Count of VMs that started creating but encountered an error.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "status": { ++ "description": "[Output Only] Creation status of BulkInsert operation - information if the flow is rolling forward or rolling back.", ++ "enum": [ ++ "CREATING", ++ "DONE", ++ "ROLLING_BACK", ++ "STATUS_UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "Rolling forward - creating VMs.", ++ "Done", ++ "Rolling back - cleaning up after an error.", ++ "" ++ ], ++ "type": "string" ++ }, ++ "targetVmCount": { ++ "description": "[Output Only] Count of VMs originally planned to be created.", ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "BundledLocalSsds": { + "id": "BundledLocalSsds", + "properties": { +@@ -45135,6 +46094,10 @@ + }, + "type": "array" + }, ++ "resourceStatus": { ++ "$ref": "CommitmentResourceStatus", ++ "description": "[Output Only] Status information for Commitment resource." ++ }, + "resources": { + "description": "A list of commitment amounts for particular resources. Note that VCPU and MEMORY resource commitments must occur together.", + "items": { +@@ -45151,7 +46114,7 @@ + "type": "string" + }, + "splitSourceCommitment": { +- "description": "Source commitment to be splitted into a new commitment.", ++ "description": "Source commitment to be split into a new commitment.", + "type": "string" + }, + "startTimestamp": { +@@ -45162,12 +46125,16 @@ + "description": "[Output Only] Status of the commitment with regards to eventual expiration (each commitment has an end date defined). One of the following values: NOT_YET_ACTIVE, ACTIVE, EXPIRED.", + "enum": [ + "ACTIVE", ++ "CANCELED_EARLY_TERMINATION", ++ "CANCELING", + "CANCELLED", + "CREATING", + "EXPIRED", + "NOT_YET_ACTIVE" + ], + "enumDescriptions": [ ++ "", ++ "", + "", + "Deprecate CANCELED status. Will use separate status to differentiate cancel by mergeCud or manual cancellation.", + "", +@@ -45187,6 +46154,7 @@ + "COMPUTE_OPTIMIZED", + "COMPUTE_OPTIMIZED_C2D", + "COMPUTE_OPTIMIZED_C3", ++ "COMPUTE_OPTIMIZED_C3D", + "GENERAL_PURPOSE", + "GENERAL_PURPOSE_E2", + "GENERAL_PURPOSE_N2", +@@ -45210,6 +46178,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -45464,6 +46433,43 @@ + }, + "type": "object" + }, ++ "CommitmentResourceStatus": { ++ "description": "[Output Only] Contains output only fields.", ++ "id": "CommitmentResourceStatus", ++ "properties": { ++ "cancellationInformation": { ++ "$ref": "CommitmentResourceStatusCancellationInformation", ++ "description": "[Output Only] An optional, contains all the needed information of cancellation." ++ } ++ }, ++ "type": "object" ++ }, ++ "CommitmentResourceStatusCancellationInformation": { ++ "id": "CommitmentResourceStatusCancellationInformation", ++ "properties": { ++ "canceledCommitment": { ++ "$ref": "Money", ++ "description": "[Output Only] An optional amount of CUDs canceled so far in the last 365 days." ++ }, ++ "canceledCommitmentLastUpdatedTimestamp": { ++ "description": "[Output Only] An optional last update time of canceled_commitment. RFC3339 text format.", ++ "type": "string" ++ }, ++ "cancellationCap": { ++ "$ref": "Money", ++ "description": "[Output Only] An optional,the cancellation cap for how much commitments can be canceled in a rolling 365 per billing account." ++ }, ++ "cancellationFee": { ++ "$ref": "Money", ++ "description": "[Output Only] An optional, cancellation fee." ++ }, ++ "cancellationFeeExpirationTimestamp": { ++ "description": "[Output Only] An optional, cancellation fee expiration time. RFC3339 text format.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "CommitmentsScopedList": { + "id": "CommitmentsScopedList", + "properties": { +@@ -45655,12 +46661,14 @@ + "enum": [ + "CONFIDENTIAL_INSTANCE_TYPE_UNSPECIFIED", + "SEV", +- "SEV_SNP" ++ "SEV_SNP", ++ "TDX" + ], + "enumDescriptions": [ + "No type specified. Do not use this value.", + "AMD Secure Encrypted Virtualization.", +- "AMD Secure Encrypted Virtualization - Secure Nested Paging." ++ "AMD Secure Encrypted Virtualization - Secure Nested Paging.", ++ "Intel Trust Domain eXtension." + ], + "type": "string" + }, +@@ -45942,6 +46950,10 @@ + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the disk using a customer-supplied encryption key or a customer-managed encryption key. Encryption keys do not protect access to metadata of the disk. After you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later. For example, to create a disk snapshot, to create a disk image, to create a machine image, or to attach the disk to a virtual machine. After you encrypt a disk with a customer-managed key, the diskEncryptionKey.kmsKeyName is set to a key *version* name once the disk is created. The disk is encrypted with this version of the key. In the response, diskEncryptionKey.kmsKeyName appears in the following format: \"diskEncryptionKey.kmsKeyName\": \"projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeysVersions/version If you do not provide an encryption key when creating the disk, then the disk is encrypted using an automatically generated key and you don't need to provide a key to use the disk later." + }, ++ "enableConfidentialCompute": { ++ "description": "Whether this disk is using confidential compute mode.", ++ "type": "boolean" ++ }, + "eraseWindowsVssSignature": { + "description": "Specifies whether the disk restored from a source snapshot should erase Windows specific VSS signature.", + "type": "boolean" +@@ -46166,6 +47178,10 @@ + ], + "type": "string" + }, ++ "storagePool": { ++ "description": "The storage pool in which the new disk is created. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /storagePools/storagePool - projects/project/zones/zone/storagePools/storagePool - zones/zone/storagePools/storagePool ", ++ "type": "string" ++ }, + "storageType": { + "description": "[Deprecated] Storage type of the persistent disk.", + "enum": [ +@@ -46333,6 +47349,14 @@ + "DiskAsyncReplication": { + "id": "DiskAsyncReplication", + "properties": { ++ "consistencyGroupPolicy": { ++ "description": "[Output Only] URL of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, ++ "consistencyGroupPolicyId": { ++ "description": "[Output Only] ID of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, + "disk": { + "description": "The other disk asynchronously replicated to or from the current disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk ", + "type": "string" +@@ -46554,6 +47578,11 @@ + }, + "description": "Key: disk, value: AsyncReplicationStatus message", + "type": "object" ++ }, ++ "usedBytes": { ++ "description": "[Output Only] Space used by data stored in the disk (in bytes). Note that this field is set only when the disk is in a storage pool.", ++ "format": "int64", ++ "type": "string" + } + }, + "type": "object" +@@ -48105,6 +49134,11 @@ + "description": "The name for an association.", + "type": "string" + }, ++ "priority": { ++ "description": "An integer indicating the priority of an association. The priority must be a positive value between 1 and 2147483647. Firewall Policies are evaluated from highest to lowest priority where 1 is the highest priority and 2147483647 is the lowest priority. The default value is `1000`. If two associations have the same priority then lexicographical order on association names is applied.", ++ "format": "int32", ++ "type": "integer" ++ }, + "shortName": { + "description": "[Output Only] The short name of the firewall policy of the association.", + "type": "string" +@@ -48605,7 +49639,7 @@ + "type": "string" + }, + "network": { +- "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", ++ "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "type": "string" + }, + "networkTier": { +@@ -49191,6 +50225,22 @@ + "description": "[Output only] Represents status related to the future reservation.", + "id": "FutureReservationStatus", + "properties": { ++ "amendmentStatus": { ++ "description": "The current status of the requested amendment.", ++ "enum": [ ++ "AMENDMENT_APPROVED", ++ "AMENDMENT_DECLINED", ++ "AMENDMENT_IN_REVIEW", ++ "AMENDMENT_STATUS_UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "The requested amendment to the Future Resevation has been approved and applied by GCP.", ++ "The requested amendment to the Future Reservation has been declined by GCP and the original state was restored.", ++ "The requested amendment to the Future Reservation is currently being reviewd by GCP.", ++ "" ++ ], ++ "type": "string" ++ }, + "autoCreatedReservations": { + "description": "Fully qualified urls of the automatically created reservations at start_time.", + "items": { +@@ -49203,6 +50253,10 @@ + "format": "int64", + "type": "string" + }, ++ "lastKnownGoodState": { ++ "$ref": "FutureReservationStatusLastKnownGoodState", ++ "description": "This field represents the future reservation before an amendment was requested. If the amendment is declined, the Future Reservation will be reverted to the last known good state. The last known good state is not set when updating a future reservation whose Procurement Status is DRAFTING." ++ }, + "lockTime": { + "description": "Time when Future Reservation would become LOCKED, after which no modifications to Future Reservation will be allowed. Applicable only after the Future Reservation is in the APPROVED state. The lock_time is an RFC3339 string. The procurement_status will transition to PROCURING state at this time.", + "type": "string" +@@ -49218,6 +50272,7 @@ + "FAILED", + "FAILED_PARTIALLY_FULFILLED", + "FULFILLED", ++ "PENDING_AMENDMENT_APPROVAL", + "PENDING_APPROVAL", + "PROCUREMENT_STATUS_UNSPECIFIED", + "PROCURING", +@@ -49232,6 +50287,7 @@ + "Future reservation failed. No additional reservations were provided.", + "Future reservation is partially fulfilled. Additional reservations were provided but did not reach total_count reserved instance slots.", + "Future reservation is fulfilled completely.", ++ "An Amendment to the Future Reservation has been requested. If the Amendment is declined, the Future Reservation will be restored to the last known good state.", + "Future reservation is pending approval by GCP.", + "", + "Future reservation is being procured by GCP. Beyond this point, Future reservation is locked and no further modifications are allowed.", +@@ -49245,6 +50301,77 @@ + }, + "type": "object" + }, ++ "FutureReservationStatusLastKnownGoodState": { ++ "description": "The state that the future reservation will be reverted to should the amendment be declined.", ++ "id": "FutureReservationStatusLastKnownGoodState", ++ "properties": { ++ "description": { ++ "description": "The description of the FutureReservation before an amendment was requested.", ++ "type": "string" ++ }, ++ "futureReservationSpecs": { ++ "$ref": "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs" ++ }, ++ "namePrefix": { ++ "description": "The name prefix of the Future Reservation before an amendment was requested.", ++ "type": "string" ++ }, ++ "procurementStatus": { ++ "description": "The status of the last known good state for the Future Reservation.", ++ "enum": [ ++ "APPROVED", ++ "CANCELLED", ++ "COMMITTED", ++ "DECLINED", ++ "DRAFTING", ++ "FAILED", ++ "FAILED_PARTIALLY_FULFILLED", ++ "FULFILLED", ++ "PENDING_AMENDMENT_APPROVAL", ++ "PENDING_APPROVAL", ++ "PROCUREMENT_STATUS_UNSPECIFIED", ++ "PROCURING", ++ "PROVISIONING" ++ ], ++ "enumDescriptions": [ ++ "Future reservation is approved by GCP.", ++ "Future reservation is cancelled by the customer.", ++ "Future reservation is committed by the customer.", ++ "Future reservation is rejected by GCP.", ++ "Related status for PlanningStatus.Draft. Transitions to PENDING_APPROVAL upon user submitting FR.", ++ "Future reservation failed. No additional reservations were provided.", ++ "Future reservation is partially fulfilled. Additional reservations were provided but did not reach total_count reserved instance slots.", ++ "Future reservation is fulfilled completely.", ++ "An Amendment to the Future Reservation has been requested. If the Amendment is declined, the Future Reservation will be restored to the last known good state.", ++ "Future reservation is pending approval by GCP.", ++ "", ++ "Future reservation is being procured by GCP. Beyond this point, Future reservation is locked and no further modifications are allowed.", ++ "Future reservation capacity is being provisioned. This state will be entered after start_time, while reservations are being created to provide total_count reserved instance slots. This state will not persist past start_time + 24h." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs": { ++ "description": "The properties of the last known good state for the Future Reservation.", ++ "id": "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs", ++ "properties": { ++ "shareSettings": { ++ "$ref": "ShareSettings", ++ "description": "The previous share settings of the Future Reservation." ++ }, ++ "specificSkuProperties": { ++ "$ref": "FutureReservationSpecificSKUProperties", ++ "description": "The previous instance related properties of the Future Reservation." ++ }, ++ "timeWindow": { ++ "$ref": "FutureReservationTimeWindow", ++ "description": "The previous time window of the Future Reservation." ++ } ++ }, ++ "type": "object" ++ }, + "FutureReservationStatusSpecificSKUProperties": { + "description": "Properties to be set for the Future Reservation.", + "id": "FutureReservationStatusSpecificSKUProperties", +@@ -49867,7 +50994,7 @@ + "id": "GuestOsFeature", + "properties": { + "type": { +- "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling guest operating system features.", ++ "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.", + "enum": [ + "BARE_METAL_LINUX_COMPATIBLE", + "FEATURE_TYPE_UNSPECIFIED", +@@ -49877,6 +51004,7 @@ + "SEV_CAPABLE", + "SEV_LIVE_MIGRATABLE", + "SEV_SNP_CAPABLE", ++ "TDX_CAPABLE", + "UEFI_COMPATIBLE", + "VIRTIO_SCSI_MULTIQUEUE", + "WINDOWS" +@@ -49892,6 +51020,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -50101,7 +51230,7 @@ + "type": "object" + }, + "HealthCheck": { +- "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/alpha/healthChecks) * [Regional](/compute/docs/reference/rest/alpha/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.HealthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", ++ "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/alpha/healthChecks) * [Regional](/compute/docs/reference/rest/alpha/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.healthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.healthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", + "id": "HealthCheck", + "properties": { + "checkIntervalSec": { +@@ -51058,7 +52187,7 @@ + "type": "string" + }, + "healthState": { +- "description": "Health state of the instance.", ++ "description": "Health state of the IPv4 address of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" +@@ -52656,7 +53785,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -53241,6 +54370,10 @@ + "format": "uint64", + "type": "string" + }, ++ "instanceFlexibilityPolicy": { ++ "$ref": "InstanceGroupManagerInstanceFlexibilityPolicy", ++ "description": "Instance flexibility allowing MIG to create VMs from multiple types of machines. Instance flexibility configuration on MIG overrides instance template configuration." ++ }, + "instanceGroup": { + "description": "[Output Only] The URL of the Instance Group resource.", + "type": "string" +@@ -53334,6 +54467,18 @@ + "format": "int32", + "type": "integer" + }, ++ "targetSizeUnit": { ++ "description": "The unit of measure for the target size.", ++ "enum": [ ++ "VCPU", ++ "VM" ++ ], ++ "enumDescriptions": [ ++ "TargetSize is the target count of vCPUs of VMs.", ++ "[Default] TargetSize is the target number of VMs." ++ ], ++ "type": "string" ++ }, + "targetStoppedSize": { + "description": "The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. ", + "format": "int32", +@@ -53592,7 +54737,7 @@ + "type": "string" + }, + "initialDelaySec": { +- "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", ++ "description": "The initial delay is the number of seconds that a new VM takes to initialize and run its startup script. During a VM's initial delay period, the MIG ignores unsuccessful health checks because the VM might be in the startup process. This prevents the MIG from prematurely recreating a VM. If the health check receives a healthy response during the initial delay, it indicates that the startup process is complete and the VM is ready. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.", + "format": "int32", + "type": "integer" + }, +@@ -53621,6 +54766,37 @@ + }, + "type": "object" + }, ++ "InstanceGroupManagerInstanceFlexibilityPolicy": { ++ "id": "InstanceGroupManagerInstanceFlexibilityPolicy", ++ "properties": { ++ "instanceSelectionLists": { ++ "additionalProperties": { ++ "$ref": "InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection" ++ }, ++ "description": "List of instance selection options that the group will use when creating new VMs.", ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection": { ++ "id": "InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection", ++ "properties": { ++ "machineTypes": { ++ "description": "Full machine-type names, e.g. \"n1-standard-16\".", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "rank": { ++ "description": "Preference of this instance selection. Lower number means higher preference. MIG will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference.", ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "InstanceGroupManagerInstanceLifecyclePolicy": { + "id": "InstanceGroupManagerInstanceLifecyclePolicy", + "properties": { +@@ -54340,7 +55516,7 @@ + "id": "InstanceGroupManagersDeleteInstancesRequest", + "properties": { + "instances": { +- "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", ++ "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have URL and can be deleted only by name. One cannot specify both URLs and names in a single request.", + "items": { + "type": "string" + }, +@@ -56058,7 +57234,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -56087,6 +57263,19 @@ + }, + "type": "object" + }, ++ "InstancesBulkInsertOperationMetadata": { ++ "id": "InstancesBulkInsertOperationMetadata", ++ "properties": { ++ "perLocationStatus": { ++ "additionalProperties": { ++ "$ref": "BulkInsertOperationStatus" ++ }, ++ "description": "Status information per location (location name is key). Example key: zones/us-central1-a", ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, + "InstancesGetEffectiveFirewallsResponse": { + "id": "InstancesGetEffectiveFirewallsResponse", + "properties": { +@@ -56370,6 +57559,23 @@ + }, + "type": "object" + }, ++ "InstancesSetSecurityPolicyRequest": { ++ "id": "InstancesSetSecurityPolicyRequest", ++ "properties": { ++ "networkInterfaces": { ++ "description": "The network interfaces that the security policy will be applied to. Network interfaces use the nicN naming format. You can only set a security policy for network interfaces with an access config.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "securityPolicy": { ++ "description": "A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "InstancesSetServiceAccountRequest": { + "id": "InstancesSetServiceAccountRequest", + "properties": { +@@ -56941,13 +58147,26 @@ + "type": "object" + }, + "Interconnect": { +- "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", ++ "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the Google Cloud network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", + "id": "Interconnect", + "properties": { + "adminEnabled": { + "description": "Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.", + "type": "boolean" + }, ++ "availableFeatures": { ++ "description": "[Output only] List of features available for this interconnect, which can take one of the following values: - MACSEC If present then the interconnect was created on MACsec capable hardware ports. If not present then the interconnect is provisioned on non-MACsec capable ports and MACsec enablement will fail.", ++ "items": { ++ "enum": [ ++ "IF_MACSEC" ++ ], ++ "enumDescriptions": [ ++ "Media Access Control security (MACsec)" ++ ], ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "circuitInfos": { + "description": "[Output Only] A list of CircuitInfo objects, that describe the individual circuits in this LAG.", + "items": { +@@ -57088,6 +58307,19 @@ + "description": "Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.", + "type": "string" + }, ++ "requestedFeatures": { ++ "description": "Optional. List of features requested for this interconnect, which can take one of the following values: - MACSEC If specified then the interconnect will be created on MACsec capable hardware ports. If not specified, the default value is false, which will allocate non-MACsec capable ports first if available. This parameter can only be provided during interconnect INSERT and cannot be changed using interconnect PATCH. Please review Interconnect Pricing for implications on enabling this flag.", ++ "items": { ++ "enum": [ ++ "IF_MACSEC" ++ ], ++ "enumDescriptions": [ ++ "Media Access Control security (MACsec)" ++ ], ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "requestedLinkCount": { + "description": "Target number of physical links in the link bundle, as requested by the customer.", + "format": "int32", +@@ -57255,7 +58487,7 @@ + "type": "string" + }, + "ipsecInternalAddresses": { +- "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool. Not currently available publicly. ", ++ "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool.", + "items": { + "type": "string" + }, +@@ -58156,6 +59388,34 @@ + "description": "[Output Only] Availability zone for this InterconnectLocation. Within a metropolitan area (metro), maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\".", + "type": "string" + }, ++ "availableFeatures": { ++ "description": "[Output only] List of features available at this interconnect location, which can take one of the following values: - MACSEC ", ++ "items": { ++ "enum": [ ++ "IF_MACSEC" ++ ], ++ "enumDescriptions": [ ++ "Media Access Control security (MACsec)" ++ ], ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "availableLinkTypes": { ++ "description": "[Output only] List of link types available at this interconnect location, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR - LINK_TYPE_ETHERNET_100G_LR ", ++ "items": { ++ "enum": [ ++ "LINK_TYPE_ETHERNET_100G_LR", ++ "LINK_TYPE_ETHERNET_10G_LR" ++ ], ++ "enumDescriptions": [ ++ "100G Ethernet, LR Optics.", ++ "10G Ethernet, LR Optics. [(rate_bps) = 10000000000];" ++ ], ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "city": { + "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", + "type": "string" +@@ -58557,7 +59817,7 @@ + "type": "object" + }, + "InterconnectRemoteLocation": { +- "description": "Represents an Cross-Cloud Interconnect Remote Location resource. You can use this resource to find remote location details about an Interconnect attachment (VLAN).", ++ "description": "Represents a Cross-Cloud Interconnect Remote Location resource. You can use this resource to find remote location details about an Interconnect attachment (VLAN).", + "id": "InterconnectRemoteLocation", + "properties": { + "address": { +@@ -58628,7 +59888,7 @@ + ], + "enumDescriptions": [ + "LACP_SUPPORTED: LACP is supported, and enabled by default on the Cross-Cloud Interconnect.", +- "LACP_UNSUPPORTED: LACP is not supported and will not be enabled on this port. GetDiagnostics will show bundleAggregationType as \"static\". GCP does not support LAGs without LACP, so requestedLinkCount must be 1." ++ "LACP_UNSUPPORTED: LACP is not supported and is not be enabled on this port. GetDiagnostics shows bundleAggregationType as \"static\". GCP does not support LAGs without LACP, so requestedLinkCount must be 1." + ], + "type": "string" + }, +@@ -58688,7 +59948,7 @@ + "id": "InterconnectRemoteLocationConstraints", + "properties": { + "portPairRemoteLocation": { +- "description": "[Output Only] Port pair remote location constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to individual ports, but the UI uses this field when ordering a pair of ports, to prevent users from accidentally ordering something that is incompatible with their cloud provider. Specifically, when ordering a redundant pair of Cross-Cloud Interconnect ports, and one of them uses a remote location with portPairMatchingRemoteLocation set to matching, the UI will require that both ports use the same remote location.", ++ "description": "[Output Only] Port pair remote location constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to individual ports, but the UI uses this field when ordering a pair of ports, to prevent users from accidentally ordering something that is incompatible with their cloud provider. Specifically, when ordering a redundant pair of Cross-Cloud Interconnect ports, and one of them uses a remote location with portPairMatchingRemoteLocation set to matching, the UI requires that both ports use the same remote location.", + "enum": [ + "PORT_PAIR_MATCHING_REMOTE_LOCATION", + "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" +@@ -58750,224 +60010,224 @@ + "kind": { + "default": "compute#interconnectRemoteLocationList", + "description": "[Output Only] Type of resource. Always compute#interconnectRemoteLocationList for lists of interconnect remote locations.", +- "type": "string" +- }, +- "nextPageToken": { +- "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", +- "type": "string" +- }, +- "selfLink": { +- "description": "[Output Only] Server-defined URL for this resource.", +- "type": "string" +- }, +- "warning": { +- "description": "[Output Only] Informational warning message.", +- "properties": { +- "code": { +- "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", +- "enum": [ +- "CLEANUP_FAILED", +- "DEPRECATED_RESOURCE_USED", +- "DEPRECATED_TYPE_USED", +- "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", +- "EXPERIMENTAL_TYPE_USED", +- "EXTERNAL_API_WARNING", +- "FIELD_VALUE_OVERRIDEN", +- "INJECTED_KERNELS_DEPRECATED", +- "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", +- "LARGE_DEPLOYMENT_WARNING", +- "MISSING_TYPE_DEPENDENCY", +- "NEXT_HOP_ADDRESS_NOT_ASSIGNED", +- "NEXT_HOP_CANNOT_IP_FORWARD", +- "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", +- "NEXT_HOP_INSTANCE_NOT_FOUND", +- "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", +- "NEXT_HOP_NOT_RUNNING", +- "NOT_CRITICAL_ERROR", +- "NO_RESULTS_ON_PAGE", +- "PARTIAL_SUCCESS", +- "REQUIRED_TOS_AGREEMENT", +- "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", +- "RESOURCE_NOT_DELETED", +- "SCHEMA_VALIDATION_IGNORED", +- "SINGLE_INSTANCE_PROPERTY_TEMPLATE", +- "UNDECLARED_PROPERTIES", +- "UNREACHABLE" +- ], +- "enumDescriptions": [ +- "Warning about failed cleanup of transient changes made by a failed operation.", +- "A link to a deprecated resource was created.", +- "When deploying and at least one of the resources has a type marked as deprecated", +- "The user created a boot disk that is larger than image size.", +- "When deploying and at least one of the resources has a type marked as experimental", +- "Warning that is present in an external api call", +- "Warning that value of a field has been overridden. Deprecated unused field.", +- "The operation involved use of an injected kernel, which is deprecated.", +- "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", +- "When deploying a deployment with a exceedingly large number of resources", +- "A resource depends on a missing type", +- "The route's nextHopIp address is not assigned to an instance on the network.", +- "The route's next hop instance cannot ip forward.", +- "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", +- "The route's nextHopInstance URL refers to an instance that does not exist.", +- "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", +- "The route's next hop instance does not have a status of RUNNING.", +- "Error which is not critical. We decided to continue the process despite the mentioned error.", +- "No results are present on a particular list page.", +- "Success is reported, but some results may be missing due to errors", +- "The user attempted to use a resource that requires a TOS they have not accepted.", +- "Warning that a resource is in use.", +- "One or more of the resources set to auto-delete could not be deleted because they were in use.", +- "When a resource schema validation is ignored.", +- "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", +- "When undeclared properties in the schema are present", +- "A given scope cannot be reached." +- ], +- "type": "string" +- }, +- "data": { +- "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", +- "items": { +- "properties": { +- "key": { +- "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", +- "type": "string" +- }, +- "value": { +- "description": "[Output Only] A warning data value corresponding to the key.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "type": "array" +- }, +- "message": { +- "description": "[Output Only] A human-readable description of the warning code.", +- "type": "string" +- } +- }, +- "type": "object" +- } +- }, +- "type": "object" +- }, +- "InterconnectRemoteLocationPermittedConnections": { +- "id": "InterconnectRemoteLocationPermittedConnections", +- "properties": { +- "interconnectLocation": { +- "description": "[Output Only] URL of an Interconnect location that is permitted to connect to this Interconnect remote location.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InterconnectsGetDiagnosticsResponse": { +- "description": "Response for the InterconnectsGetDiagnosticsRequest.", +- "id": "InterconnectsGetDiagnosticsResponse", +- "properties": { +- "result": { +- "$ref": "InterconnectDiagnostics" +- } +- }, +- "type": "object" +- }, +- "InterconnectsGetMacsecConfigResponse": { +- "description": "Response for the InterconnectsGetMacsecConfigRequest.", +- "id": "InterconnectsGetMacsecConfigResponse", +- "properties": { +- "etag": { +- "description": "end_interface: MixerGetResponseWithEtagBuilder", +- "type": "string" +- }, +- "result": { +- "$ref": "InterconnectMacsecConfig" +- } +- }, +- "type": "object" +- }, +- "InternalIpAddress": { +- "id": "InternalIpAddress", +- "properties": { +- "cidr": { +- "description": "IP CIDR address or range.", +- "type": "string" +- }, +- "owner": { +- "description": "The owner of the internal IP address.", +- "type": "string" +- }, +- "purpose": { +- "description": "The purpose of the internal IP address if applicable.", +- "type": "string" +- }, +- "region": { +- "description": "The region of the internal IP address if applicable.", +- "type": "string" +- }, +- "type": { +- "description": "The type of the internal IP address.", +- "enum": [ +- "PEER_RESERVED", +- "PEER_USED", +- "REMOTE_RESERVED", +- "REMOTE_USED", +- "RESERVED", +- "SUBNETWORK", +- "TYPE_UNSPECIFIED" +- ], +- "enumDescriptions": [ +- "Reserved IP ranges on peer networks.", +- "Used IP ranges on peer networks, including peer subnetwork IP ranges.", +- "Reserved IP ranges on peer networks of peer networks.", +- "Used IP ranges on peer networks of peer networks.", +- "Reserved IP ranges on local network.", +- "Subnetwork IP ranges on local network.", +- "" +- ], +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InternalIpOwner": { +- "id": "InternalIpOwner", +- "properties": { +- "ipCidrRange": { +- "description": "IP CIDR range being owned.", +- "type": "string" +- }, +- "owners": { +- "description": "URLs of the IP owners of the IP CIDR range.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- }, +- "systemOwned": { +- "description": "Whether this IP CIDR range is reserved for system use.", +- "type": "boolean" +- } +- }, +- "type": "object" +- }, +- "IpAddressesList": { +- "id": "IpAddressesList", +- "properties": { +- "id": { +- "description": "[Output Only] Unique identifier for the resource; defined by the server.", +- "type": "string" +- }, +- "items": { +- "description": "A list of InternalIpAddress resources.", +- "items": { +- "$ref": "InternalIpAddress" +- }, +- "type": "array" +- }, +- "kind": { +- "default": "compute#ipAddressesList", +- "description": "[Output Only] Type of resource. Always compute#ipAddressesList for IP addresses lists.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token lets you get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationPermittedConnections": { ++ "id": "InterconnectRemoteLocationPermittedConnections", ++ "properties": { ++ "interconnectLocation": { ++ "description": "[Output Only] URL of an Interconnect location that is permitted to connect to this Interconnect remote location.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectsGetDiagnosticsResponse": { ++ "description": "Response for the InterconnectsGetDiagnosticsRequest.", ++ "id": "InterconnectsGetDiagnosticsResponse", ++ "properties": { ++ "result": { ++ "$ref": "InterconnectDiagnostics" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectsGetMacsecConfigResponse": { ++ "description": "Response for the InterconnectsGetMacsecConfigRequest.", ++ "id": "InterconnectsGetMacsecConfigResponse", ++ "properties": { ++ "etag": { ++ "description": "end_interface: MixerGetResponseWithEtagBuilder", ++ "type": "string" ++ }, ++ "result": { ++ "$ref": "InterconnectMacsecConfig" ++ } ++ }, ++ "type": "object" ++ }, ++ "InternalIpAddress": { ++ "id": "InternalIpAddress", ++ "properties": { ++ "cidr": { ++ "description": "IP CIDR address or range.", ++ "type": "string" ++ }, ++ "owner": { ++ "description": "The owner of the internal IP address.", ++ "type": "string" ++ }, ++ "purpose": { ++ "description": "The purpose of the internal IP address if applicable.", ++ "type": "string" ++ }, ++ "region": { ++ "description": "The region of the internal IP address if applicable.", ++ "type": "string" ++ }, ++ "type": { ++ "description": "The type of the internal IP address.", ++ "enum": [ ++ "PEER_RESERVED", ++ "PEER_USED", ++ "REMOTE_RESERVED", ++ "REMOTE_USED", ++ "RESERVED", ++ "SUBNETWORK", ++ "TYPE_UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "Reserved IP ranges on peer networks.", ++ "Used IP ranges on peer networks, including peer subnetwork IP ranges.", ++ "Reserved IP ranges on peer networks of peer networks.", ++ "Used IP ranges on peer networks of peer networks.", ++ "Reserved IP ranges on local network.", ++ "Subnetwork IP ranges on local network.", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InternalIpOwner": { ++ "id": "InternalIpOwner", ++ "properties": { ++ "ipCidrRange": { ++ "description": "IP CIDR range being owned.", ++ "type": "string" ++ }, ++ "owners": { ++ "description": "URLs of the IP owners of the IP CIDR range.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "systemOwned": { ++ "description": "Whether this IP CIDR range is reserved for system use.", ++ "type": "boolean" ++ } ++ }, ++ "type": "object" ++ }, ++ "IpAddressesList": { ++ "id": "IpAddressesList", ++ "properties": { ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "description": "A list of InternalIpAddress resources.", ++ "items": { ++ "$ref": "InternalIpAddress" ++ }, ++ "type": "array" ++ }, ++ "kind": { ++ "default": "compute#ipAddressesList", ++ "description": "[Output Only] Type of resource. Always compute#ipAddressesList for IP addresses lists.", + "type": "string" + }, + "nextPageToken": { +@@ -59983,7 +61243,7 @@ + "type": "integer" + }, + "guestAcceleratorType": { +- "description": "The accelerator type resource name, not a full URL, e.g. 'nvidia-tesla-k80'.", ++ "description": "The accelerator type resource name, not a full URL, e.g. nvidia-tesla-t4.", + "type": "string" + } + }, +@@ -60501,7 +61761,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -60787,6 +62047,27 @@ + }, + "type": "object" + }, ++ "Money": { ++ "description": "Represents an amount of money with its currency type.", ++ "id": "Money", ++ "properties": { ++ "currencyCode": { ++ "description": "The three-letter currency code defined in ISO 4217.", ++ "type": "string" ++ }, ++ "nanos": { ++ "description": "Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "units": { ++ "description": "The whole units of the amount. For example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", ++ "format": "int64", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "MutualTls": { + "description": "[Deprecated] Configuration for the mutual Tls mode for peer authentication. Configuration for the mutual Tls mode for peer authentication.", + "id": "MutualTls", +@@ -60824,6 +62105,72 @@ + }, + "type": "object" + }, ++ "NatIpInfo": { ++ "description": "Contains NAT IP information of a NAT config (i.e. usage status, mode).", ++ "id": "NatIpInfo", ++ "properties": { ++ "natIpInfoMappings": { ++ "description": "A list of all NAT IPs assigned to this NAT config.", ++ "items": { ++ "$ref": "NatIpInfoNatIpInfoMapping" ++ }, ++ "type": "array" ++ }, ++ "natName": { ++ "description": "Name of the NAT config which the NAT IP belongs to.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "NatIpInfoNatIpInfoMapping": { ++ "description": "Contains information of a NAT IP.", ++ "id": "NatIpInfoNatIpInfoMapping", ++ "properties": { ++ "mode": { ++ "description": "Specifies whether NAT IP is auto or manual.", ++ "enum": [ ++ "AUTO", ++ "MANUAL" ++ ], ++ "enumDescriptions": [ ++ "", ++ "" ++ ], ++ "type": "string" ++ }, ++ "natIp": { ++ "description": "NAT IP address. For example: 203.0.113.11.", ++ "type": "string" ++ }, ++ "usage": { ++ "description": "Specifies whether NAT IP is currently serving at least one endpoint or not.", ++ "enum": [ ++ "IN_USE", ++ "UNUSED" ++ ], ++ "enumDescriptions": [ ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "NatIpInfoResponse": { ++ "id": "NatIpInfoResponse", ++ "properties": { ++ "result": { ++ "description": "[Output Only] A list of NAT IP information.", ++ "items": { ++ "$ref": "NatIpInfo" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "Network": { + "description": "Represents a VPC Network resource. Networks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network.", + "id": "Network", +@@ -60854,7 +62201,7 @@ + "type": "string" + }, + "gatewayIPv4": { +- "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", ++ "description": "[Output Only] The gateway address for default routing out of the network, selected by Google Cloud.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", + "type": "string" + }, +@@ -60965,7 +62312,7 @@ + "type": "string" + }, + "fingerprint": { +- "description": "[Output Only] Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", ++ "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", + "format": "byte", + "type": "string" + }, +@@ -60990,7 +62337,7 @@ + "type": "string" + }, + "network": { +- "description": "[Output Only] The URL of the network which the Network Attachment belongs to.", ++ "description": "[Output Only] The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks.", + "type": "string" + }, + "producerAcceptLists": { +@@ -61154,7 +62501,7 @@ + "id": "NetworkAttachmentConnectedEndpoint", + "properties": { + "ipAddress": { +- "description": "The IP address assigned to the producer instance network interface. This value will be a range in case of Serverless.", ++ "description": "The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless.", + "type": "string" + }, + "projectIdOrNum": { +@@ -61162,7 +62509,7 @@ + "type": "string" + }, + "secondaryIpCidrRanges": { +- "description": "Alias IP ranges from the same subnetwork", ++ "description": "Alias IP ranges from the same subnetwork.", + "items": { + "type": "string" + }, +@@ -62636,7 +63983,7 @@ + "type": "integer" + }, + "stackType": { +- "description": "The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations.", ++ "description": "The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations.", + "enum": [ + "IPV4_IPV6", + "IPV4_ONLY" +@@ -65263,6 +66610,9 @@ + "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format.", + "type": "string" + }, ++ "instancesBulkInsertOperationMetadata": { ++ "$ref": "InstancesBulkInsertOperationMetadata" ++ }, + "kind": { + "default": "compute#operation", + "description": "[Output Only] Type of the resource. Always `compute#operation` for Operation resources.", +@@ -68475,11 +69825,15 @@ + "NETWORK_ATTACHMENTS", + "NETWORK_ENDPOINT_GROUPS", + "NETWORK_FIREWALL_POLICIES", ++ "NET_LB_SECURITY_POLICIES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION", + "NODE_GROUPS", + "NODE_TEMPLATES", + "NVIDIA_A100_80GB_GPUS", + "NVIDIA_A100_GPUS", + "NVIDIA_K80_GPUS", ++ "NVIDIA_L4_GPUS", + "NVIDIA_P100_GPUS", + "NVIDIA_P100_VWS_GPUS", + "NVIDIA_P4_GPUS", +@@ -68494,6 +69848,7 @@ + "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS", + "PREEMPTIBLE_NVIDIA_A100_GPUS", + "PREEMPTIBLE_NVIDIA_K80_GPUS", ++ "PREEMPTIBLE_NVIDIA_L4_GPUS", + "PREEMPTIBLE_NVIDIA_P100_GPUS", + "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", + "PREEMPTIBLE_NVIDIA_P4_GPUS", +@@ -68521,6 +69876,7 @@ + "ROUTES", + "SECURITY_POLICIES", + "SECURITY_POLICIES_PER_REGION", ++ "SECURITY_POLICY_ADVANCED_RULES_PER_REGION", + "SECURITY_POLICY_CEVAL_RULES", + "SECURITY_POLICY_RULES", + "SECURITY_POLICY_RULES_PER_REGION", +@@ -68678,6 +70034,12 @@ + "", + "", + "", ++ "", ++ "", ++ "", ++ "", ++ "", ++ "", + "The total number of snapshots allowed for a single project.", + "", + "", +@@ -71672,9 +73034,6 @@ + }, + "description": "[Output Only] Represents the status of the service integration specs defined by the user in instance.serviceIntegrationSpecs.", + "type": "object" +- }, +- "upcomingMaintenance": { +- "$ref": "ResourceStatusUpcomingMaintenance" + } + }, + "type": "object" +@@ -71733,16 +73092,6 @@ + }, + "type": "object" + }, +- "ResourceStatusUpcomingMaintenance": { +- "id": "ResourceStatusUpcomingMaintenance", +- "properties": { +- "canReschedule": { +- "description": "Indicates if the maintenance can be customer triggered. See go/sf-ctm-design for more details", +- "type": "boolean" +- } +- }, +- "type": "object" +- }, + "RolloutPolicy": { + "description": "A rollout policy configuration.", + "id": "RolloutPolicy", +@@ -72514,14 +73863,14 @@ + "description": "BFD configuration for the BGP peering." + }, + "customLearnedIpRanges": { +- "description": "User-defined Custom Learned Route IP range list for a BGP session.", ++ "description": "A list of user-defined custom learned route IP address ranges for a BGP session.", + "items": { + "$ref": "RouterBgpPeerCustomLearnedIpRange" + }, + "type": "array" + }, + "customLearnedRoutePriority": { +- "description": "User-defined Custom Learned Route Priority for a BGP session. This will be applied to all Custom Learned Route ranges of the BGP session, if not given, google-managed priority of 100 is used.", ++ "description": "The user-defined custom learned route priority for a BGP session. This value is applied to all custom learned route ranges for the session. You can choose a value from `0` to `65335`. If you don't provide a value, Google Cloud assigns a priority of `100` to the ranges.", + "format": "int32", + "type": "integer" + }, +@@ -72674,7 +74023,7 @@ + "id": "RouterBgpPeerCustomLearnedIpRange", + "properties": { + "range": { +- "description": "The Custom Learned Route IP range. Must be a valid CIDR-formatted prefix. If an IP is provided without a subnet mask, it is interpreted as a /32 singular IP range for IPv4, and /128 for IPv6.", ++ "description": "The custom learned route IP address range. Must be a valid CIDR-formatted prefix. If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, a `/32` singular IP address range, and, for IPv6, `/128`.", + "type": "string" + } + }, +@@ -72982,7 +74331,7 @@ + "type": "array" + }, + "sourceSubnetworkIpRangesToNat": { +- "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", ++ "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then there should not be any other Router.Nat section in any Router for this network in this region.", + "enum": [ + "ALL_SUBNETWORKS_ALL_IP_RANGES", + "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", +@@ -74415,15 +75764,15 @@ + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig": { +- "description": "Configuration options for L7 DDoS detection.", ++ "description": "Configuration options for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig", + "properties": { + "enable": { +- "description": "If set to true, enables CAAP for L7 DDoS detection.", ++ "description": "If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "boolean" + }, + "ruleVisibility": { +- "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules.", ++ "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "enum": [ + "PREMIUM", + "STANDARD" +@@ -74447,9 +75796,11 @@ + "jsonParsing": { + "enum": [ + "DISABLED", +- "STANDARD" ++ "STANDARD", ++ "STANDARD_WITH_GRAPHQL" + ], + "enumDescriptions": [ ++ "", + "", + "" + ], +@@ -74465,6 +75816,13 @@ + "" + ], + "type": "string" ++ }, ++ "userIpRequestHeaders": { ++ "description": "An optional list of case-insensitive request header names to use for resolving the callers client IP address.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" + } + }, + "type": "object" +@@ -74663,7 +76021,7 @@ + "id": "SecurityPolicyRecaptchaOptionsConfig", + "properties": { + "redirectSiteKey": { +- "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used.", ++ "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + } + }, +@@ -74683,7 +76041,7 @@ + "id": "SecurityPolicyRule", + "properties": { + "action": { +- "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", ++ "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", + "type": "string" + }, + "description": { +@@ -74708,7 +76066,7 @@ + }, + "headerAction": { + "$ref": "SecurityPolicyRuleHttpHeaderAction", +- "description": "Optional, additional actions that are performed on headers." ++ "description": "Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "kind": { + "default": "compute#securityPolicyRule", +@@ -74742,7 +76100,7 @@ + }, + "redirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action. Cannot be specified for any other actions." ++ "description": "Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "redirectTarget": { + "description": "This must be specified for redirect actions. Cannot be specified for any other actions.", +@@ -74824,7 +76182,11 @@ + }, + "expr": { + "$ref": "Expr", +- "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." ++ "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing `evaluateThreatIntelligence` require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies." ++ }, ++ "exprOptions": { ++ "$ref": "SecurityPolicyRuleMatcherExprOptions", ++ "description": "The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr')." + }, + "versionedExpr": { + "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", +@@ -74909,6 +76271,36 @@ + }, + "type": "object" + }, ++ "SecurityPolicyRuleMatcherExprOptions": { ++ "id": "SecurityPolicyRuleMatcherExprOptions", ++ "properties": { ++ "recaptchaOptions": { ++ "$ref": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", ++ "description": "reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field will have no effect." ++ } ++ }, ++ "type": "object" ++ }, ++ "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions": { ++ "id": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", ++ "properties": { ++ "actionTokenSiteKeys": { ++ "description": "A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "sessionTokenSiteKeys": { ++ "description": "A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "SecurityPolicyRuleNetworkMatcher": { + "description": "Represents a match condition that incoming network traffic is evaluated against.", + "id": "SecurityPolicyRuleNetworkMatcher", +@@ -75130,7 +76522,7 @@ + "type": "string" + }, + "exceedAction": { +- "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below.", ++ "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below. The `redirect` action is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + }, + "exceedActionRpcStatus": { +@@ -75139,7 +76531,7 @@ + }, + "exceedRedirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect." ++ "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "rateLimitThreshold": { + "$ref": "SecurityPolicyRuleRateLimitOptionsThreshold", +@@ -75299,7 +76691,7 @@ + "description": "The configuration needed to generate a signature for access to private storage buckets that support AWS's Signature Version 4 for authentication. Allowed only for INTERNET_IP_PORT and INTERNET_FQDN_PORT NEG backends." + }, + "clientTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted.", + "type": "string" + }, + "clientTlsSettings": { +@@ -75307,7 +76699,7 @@ + "description": "[Deprecated] TLS Settings for the backend service." + }, + "subjectAltNames": { +- "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact.", ++ "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode).", + "items": { + "type": "string" + }, +@@ -75416,7 +76808,7 @@ + "type": "object" + }, + "ServiceAttachment": { +- "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service. next tag = 20", ++ "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service.", + "id": "ServiceAttachment", + "properties": { + "connectedEndpoints": { +@@ -75513,6 +76905,10 @@ + "$ref": "Uint128", + "description": "[Output Only] An 128-bit global unique ID of the PSC service attachment." + }, ++ "reconcileConnections": { ++ "description": "This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. - If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . - If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. For newly created service attachment, this boolean defaults to true.", ++ "type": "boolean" ++ }, + "region": { + "description": "[Output Only] URL of the region where the service attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" +@@ -75954,7 +77350,7 @@ + "type": "object" + }, + "SetCommonInstanceMetadataOperationMetadata": { +- "description": "Encapsulates partial completion metadata for SetCommonInstanceMetadata. Will be propagated on Operation.metadata as per go/partial-completion-api-clean. See go/gce-aips/2822 for API council results.", ++ "description": "Encapsulates partial completion metadata for SetCommonInstanceMetadata. Will be propagated on Operation.metadata.", + "id": "SetCommonInstanceMetadataOperationMetadata", + "properties": { + "clientOperationId": { +@@ -76268,6 +77664,10 @@ + "format": "int64", + "type": "string" + }, ++ "enableConfidentialCompute": { ++ "description": "Whether this snapshot is created from a confidential compute mode disk. [Output Only]: This field is not set by user, but from source disk.", ++ "type": "boolean" ++ }, + "guestFlush": { + "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process.", + "type": "boolean" +@@ -76328,6 +77728,7 @@ + "name": { + "annotations": { + "required": [ ++ "compute.disks.createSnapshot", + "compute.snapshots.insert" + ] + }, +@@ -76371,6 +77772,10 @@ + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." + }, ++ "sourceDiskForRecoveryCheckpoint": { ++ "description": "The source disk whose recovery checkpoint will be used to create this snapshot.", ++ "type": "string" ++ }, + "sourceDiskId": { + "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name.", + "type": "string" +@@ -77562,303 +78967,807 @@ + }, + "type": "object" + }, +- "SslPolicy": { +- "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", +- "id": "SslPolicy", +- "properties": { +- "creationTimestamp": { +- "description": "[Output Only] Creation timestamp in RFC3339 text format.", +- "type": "string" +- }, +- "customFeatures": { +- "description": "A list of features enabled when the selected profile is CUSTOM. The method returns the set of features that can be specified in this list. This field must be empty if the profile is not CUSTOM.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- }, +- "description": { +- "description": "An optional description of this resource. Provide this property when you create the resource.", +- "type": "string" +- }, +- "enabledFeatures": { +- "description": "[Output Only] The list of features enabled in the SSL policy.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- }, +- "fingerprint": { +- "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a SslPolicy. An up-to-date fingerprint must be provided in order to update the SslPolicy, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an SslPolicy.", +- "format": "byte", +- "type": "string" +- }, +- "id": { +- "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", +- "format": "uint64", +- "type": "string" +- }, +- "kind": { +- "default": "compute#sslPolicy", +- "description": "[Output only] Type of the resource. Always compute#sslPolicyfor SSL policies.", +- "type": "string" +- }, +- "minTlsVersion": { +- "description": "The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. This can be one of TLS_1_0, TLS_1_1, TLS_1_2.", +- "enum": [ +- "TLS_1_0", +- "TLS_1_1", +- "TLS_1_2" +- ], +- "enumDescriptions": [ +- "TLS 1.0", +- "TLS 1.1", +- "TLS 1.2" +- ], +- "type": "string" +- }, +- "name": { +- "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- "type": "string" +- }, +- "profile": { +- "description": "Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This can be one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, the set of SSL features to enable must be specified in the customFeatures field.", +- "enum": [ +- "COMPATIBLE", +- "CUSTOM", +- "MODERN", +- "RESTRICTED" +- ], +- "enumDescriptions": [ +- "Compatible profile. Allows the broadset set of clients, even those which support only out-of-date SSL features to negotiate with the load balancer.", +- "Custom profile. Allow only the set of allowed SSL features specified in the customFeatures field.", +- "Modern profile. Supports a wide set of SSL features, allowing modern clients to negotiate SSL with the load balancer.", +- "Restricted profile. Supports a reduced set of SSL features, intended to meet stricter compliance requirements." +- ], +- "type": "string" +- }, +- "region": { +- "description": "[Output Only] URL of the region where the regional SSL policy resides. This field is not applicable to global SSL policies.", +- "type": "string" +- }, +- "selfLink": { +- "description": "[Output Only] Server-defined URL for the resource.", +- "type": "string" +- }, +- "selfLinkWithId": { +- "description": "[Output Only] Server-defined URL for this resource with the resource id.", +- "type": "string" +- }, +- "tlsSettings": { +- "$ref": "ServerTlsSettings", +- "description": "Security settings for the proxy. This field is only applicable to a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED." +- }, +- "warnings": { +- "description": "[Output Only] If potential misconfigurations are detected for this SSL policy, this field will be populated with warning messages.", +- "items": { +- "properties": { +- "code": { +- "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", +- "enum": [ +- "CLEANUP_FAILED", +- "DEPRECATED_RESOURCE_USED", +- "DEPRECATED_TYPE_USED", +- "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", +- "EXPERIMENTAL_TYPE_USED", +- "EXTERNAL_API_WARNING", +- "FIELD_VALUE_OVERRIDEN", +- "INJECTED_KERNELS_DEPRECATED", +- "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", +- "LARGE_DEPLOYMENT_WARNING", +- "MISSING_TYPE_DEPENDENCY", +- "NEXT_HOP_ADDRESS_NOT_ASSIGNED", +- "NEXT_HOP_CANNOT_IP_FORWARD", +- "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", +- "NEXT_HOP_INSTANCE_NOT_FOUND", +- "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", +- "NEXT_HOP_NOT_RUNNING", +- "NOT_CRITICAL_ERROR", +- "NO_RESULTS_ON_PAGE", +- "PARTIAL_SUCCESS", +- "REQUIRED_TOS_AGREEMENT", +- "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", +- "RESOURCE_NOT_DELETED", +- "SCHEMA_VALIDATION_IGNORED", +- "SINGLE_INSTANCE_PROPERTY_TEMPLATE", +- "UNDECLARED_PROPERTIES", +- "UNREACHABLE" +- ], +- "enumDescriptions": [ +- "Warning about failed cleanup of transient changes made by a failed operation.", +- "A link to a deprecated resource was created.", +- "When deploying and at least one of the resources has a type marked as deprecated", +- "The user created a boot disk that is larger than image size.", +- "When deploying and at least one of the resources has a type marked as experimental", +- "Warning that is present in an external api call", +- "Warning that value of a field has been overridden. Deprecated unused field.", +- "The operation involved use of an injected kernel, which is deprecated.", +- "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", +- "When deploying a deployment with a exceedingly large number of resources", +- "A resource depends on a missing type", +- "The route's nextHopIp address is not assigned to an instance on the network.", +- "The route's next hop instance cannot ip forward.", +- "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", +- "The route's nextHopInstance URL refers to an instance that does not exist.", +- "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", +- "The route's next hop instance does not have a status of RUNNING.", +- "Error which is not critical. We decided to continue the process despite the mentioned error.", +- "No results are present on a particular list page.", +- "Success is reported, but some results may be missing due to errors", +- "The user attempted to use a resource that requires a TOS they have not accepted.", +- "Warning that a resource is in use.", +- "One or more of the resources set to auto-delete could not be deleted because they were in use.", +- "When a resource schema validation is ignored.", +- "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", +- "When undeclared properties in the schema are present", +- "A given scope cannot be reached." +- ], +- "type": "string" +- }, +- "data": { +- "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", +- "items": { +- "properties": { +- "key": { +- "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", +- "type": "string" +- }, +- "value": { +- "description": "[Output Only] A warning data value corresponding to the key.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "type": "array" +- }, +- "message": { +- "description": "[Output Only] A human-readable description of the warning code.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "SslPolicyReference": { +- "id": "SslPolicyReference", +- "properties": { +- "sslPolicy": { +- "description": "URL of the SSL policy resource. Set this to empty string to clear any existing SSL policy associated with the target proxy resource.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "StatefulPolicy": { +- "id": "StatefulPolicy", +- "properties": { +- "preservedState": { +- "$ref": "StatefulPolicyPreservedState" +- } +- }, +- "type": "object" +- }, +- "StatefulPolicyPreservedState": { +- "description": "Configuration of preserved resources.", +- "id": "StatefulPolicyPreservedState", +- "properties": { +- "disks": { +- "additionalProperties": { +- "$ref": "StatefulPolicyPreservedStateDiskDevice" +- }, +- "description": "Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks.", +- "type": "object" +- }, +- "externalIPs": { +- "additionalProperties": { +- "$ref": "StatefulPolicyPreservedStateNetworkIp" +- }, +- "description": "External network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name.", +- "type": "object" +- }, +- "internalIPs": { +- "additionalProperties": { +- "$ref": "StatefulPolicyPreservedStateNetworkIp" +- }, +- "description": "Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name.", +- "type": "object" +- } +- }, +- "type": "object" +- }, +- "StatefulPolicyPreservedStateDiskDevice": { +- "id": "StatefulPolicyPreservedStateDiskDevice", +- "properties": { +- "autoDelete": { +- "description": "These stateful disks will never be deleted during autohealing, update or VM instance recreate operations. This flag is used to configure if the disk should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. Note: disks attached in READ_ONLY mode cannot be auto-deleted.", +- "enum": [ +- "NEVER", +- "ON_PERMANENT_INSTANCE_DELETION" +- ], +- "enumDescriptions": [ +- "", +- "" +- ], +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "StatefulPolicyPreservedStateNetworkIp": { +- "id": "StatefulPolicyPreservedStateNetworkIp", +- "properties": { +- "autoDelete": { +- "description": "These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted.", +- "enum": [ +- "NEVER", +- "ON_PERMANENT_INSTANCE_DELETION" +- ], +- "enumDescriptions": [ +- "", +- "" +- ], +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "Status": { +- "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", +- "id": "Status", +- "properties": { +- "code": { +- "description": "The status code, which should be an enum value of google.rpc.Code.", +- "format": "int32", +- "type": "integer" +- }, +- "details": { +- "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", +- "items": { +- "additionalProperties": { +- "description": "Properties of the object. Contains field @type with type URL.", +- "type": "any" +- }, +- "type": "object" +- }, +- "type": "array" +- }, +- "message": { +- "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", +- "type": "string" +- } +- }, +- "type": "object" +- }, ++ "SslPolicy": { ++ "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", ++ "id": "SslPolicy", ++ "properties": { ++ "creationTimestamp": { ++ "description": "[Output Only] Creation timestamp in RFC3339 text format.", ++ "type": "string" ++ }, ++ "customFeatures": { ++ "description": "A list of features enabled when the selected profile is CUSTOM. The method returns the set of features that can be specified in this list. This field must be empty if the profile is not CUSTOM.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "description": { ++ "description": "An optional description of this resource. Provide this property when you create the resource.", ++ "type": "string" ++ }, ++ "enabledFeatures": { ++ "description": "[Output Only] The list of features enabled in the SSL policy.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "fingerprint": { ++ "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a SslPolicy. An up-to-date fingerprint must be provided in order to update the SslPolicy, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an SslPolicy.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "kind": { ++ "default": "compute#sslPolicy", ++ "description": "[Output only] Type of the resource. Always compute#sslPolicyfor SSL policies.", ++ "type": "string" ++ }, ++ "minTlsVersion": { ++ "description": "The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. This can be one of TLS_1_0, TLS_1_1, TLS_1_2.", ++ "enum": [ ++ "TLS_1_0", ++ "TLS_1_1", ++ "TLS_1_2" ++ ], ++ "enumDescriptions": [ ++ "TLS 1.0", ++ "TLS 1.1", ++ "TLS 1.2" ++ ], ++ "type": "string" ++ }, ++ "name": { ++ "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "type": "string" ++ }, ++ "profile": { ++ "description": "Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This can be one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, the set of SSL features to enable must be specified in the customFeatures field.", ++ "enum": [ ++ "COMPATIBLE", ++ "CUSTOM", ++ "MODERN", ++ "RESTRICTED" ++ ], ++ "enumDescriptions": [ ++ "Compatible profile. Allows the broadset set of clients, even those which support only out-of-date SSL features to negotiate with the load balancer.", ++ "Custom profile. Allow only the set of allowed SSL features specified in the customFeatures field.", ++ "Modern profile. Supports a wide set of SSL features, allowing modern clients to negotiate SSL with the load balancer.", ++ "Restricted profile. Supports a reduced set of SSL features, intended to meet stricter compliance requirements." ++ ], ++ "type": "string" ++ }, ++ "region": { ++ "description": "[Output Only] URL of the region where the regional SSL policy resides. This field is not applicable to global SSL policies.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for the resource.", ++ "type": "string" ++ }, ++ "selfLinkWithId": { ++ "description": "[Output Only] Server-defined URL for this resource with the resource id.", ++ "type": "string" ++ }, ++ "tlsSettings": { ++ "$ref": "ServerTlsSettings", ++ "description": "Security settings for the proxy. This field is only applicable to a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED." ++ }, ++ "warnings": { ++ "description": "[Output Only] If potential misconfigurations are detected for this SSL policy, this field will be populated with warning messages.", ++ "items": { ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "SslPolicyReference": { ++ "id": "SslPolicyReference", ++ "properties": { ++ "sslPolicy": { ++ "description": "URL of the SSL policy resource. Set this to empty string to clear any existing SSL policy associated with the target proxy resource.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "StatefulPolicy": { ++ "id": "StatefulPolicy", ++ "properties": { ++ "preservedState": { ++ "$ref": "StatefulPolicyPreservedState" ++ } ++ }, ++ "type": "object" ++ }, ++ "StatefulPolicyPreservedState": { ++ "description": "Configuration of preserved resources.", ++ "id": "StatefulPolicyPreservedState", ++ "properties": { ++ "disks": { ++ "additionalProperties": { ++ "$ref": "StatefulPolicyPreservedStateDiskDevice" ++ }, ++ "description": "Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks.", ++ "type": "object" ++ }, ++ "externalIPs": { ++ "additionalProperties": { ++ "$ref": "StatefulPolicyPreservedStateNetworkIp" ++ }, ++ "description": "External network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name.", ++ "type": "object" ++ }, ++ "internalIPs": { ++ "additionalProperties": { ++ "$ref": "StatefulPolicyPreservedStateNetworkIp" ++ }, ++ "description": "Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name.", ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "StatefulPolicyPreservedStateDiskDevice": { ++ "id": "StatefulPolicyPreservedStateDiskDevice", ++ "properties": { ++ "autoDelete": { ++ "description": "These stateful disks will never be deleted during autohealing, update or VM instance recreate operations. This flag is used to configure if the disk should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. Note: disks attached in READ_ONLY mode cannot be auto-deleted.", ++ "enum": [ ++ "NEVER", ++ "ON_PERMANENT_INSTANCE_DELETION" ++ ], ++ "enumDescriptions": [ ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "StatefulPolicyPreservedStateNetworkIp": { ++ "id": "StatefulPolicyPreservedStateNetworkIp", ++ "properties": { ++ "autoDelete": { ++ "description": "These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted.", ++ "enum": [ ++ "NEVER", ++ "ON_PERMANENT_INSTANCE_DELETION" ++ ], ++ "enumDescriptions": [ ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "Status": { ++ "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", ++ "id": "Status", ++ "properties": { ++ "code": { ++ "description": "The status code, which should be an enum value of google.rpc.Code.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "details": { ++ "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", ++ "items": { ++ "additionalProperties": { ++ "description": "Properties of the object. Contains field @type with type URL.", ++ "type": "any" ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "StoragePool": { ++ "description": "Represents a zonal storage pool resource.", ++ "id": "StoragePool", ++ "properties": { ++ "creationTimestamp": { ++ "description": "[Output Only] Creation timestamp in RFC3339 text format.", ++ "type": "string" ++ }, ++ "description": { ++ "description": "An optional description of this resource. Provide this property when you create the resource.", ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "kind": { ++ "default": "compute#storagePool", ++ "description": "[Output Only] Type of the resource. Always compute#storagePool for storage pools.", ++ "type": "string" ++ }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this storage pool, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a storage pool.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels to apply to this storage pool. These can be later modified by the setLabels method.", ++ "type": "object" ++ }, ++ "name": { ++ "annotations": { ++ "required": [ ++ "compute.storagePools.insert" ++ ] ++ }, ++ "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "type": "string" ++ }, ++ "provisionedIops": { ++ "description": "Provsioned IOPS of the storage pool.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "resourceStatus": { ++ "$ref": "StoragePoolResourceStatus", ++ "description": "[Output Only] Status information for the storage pool resource." ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined fully-qualified URL for this resource.", ++ "type": "string" ++ }, ++ "selfLinkWithId": { ++ "description": "[Output Only] Server-defined URL for this resource's resource id.", ++ "type": "string" ++ }, ++ "sizeGb": { ++ "description": "Size, in GiB, of the storage pool.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "state": { ++ "description": "[Output Only] The status of storage pool creation. - CREATING: Storage pool is provisioning. storagePool. - FAILED: Storage pool creation failed. - READY: Storage pool is ready for use. - DELETING: Storage pool is deleting. ", ++ "enum": [ ++ "CREATING", ++ "DELETING", ++ "FAILED", ++ "READY" ++ ], ++ "enumDescriptions": [ ++ "StoragePool is provisioning", ++ "StoragePool is deleting.", ++ "StoragePool creation failed.", ++ "StoragePool is ready for use." ++ ], ++ "type": "string" ++ }, ++ "type": { ++ "description": "Type of the storage pool", ++ "enum": [ ++ "SSD", ++ "UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "", ++ "" ++ ], ++ "type": "string" ++ }, ++ "zone": { ++ "description": "[Output Only] URL of the zone where the storage pool resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "StoragePoolAggregatedList": { ++ "id": "StoragePoolAggregatedList", ++ "properties": { ++ "etag": { ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "additionalProperties": { ++ "$ref": "StoragePoolsScopedList", ++ "description": "[Output Only] Name of the scope containing this set of storage pool." ++ }, ++ "description": "A list of StoragePoolsScopedList resources.", ++ "type": "object" ++ }, ++ "kind": { ++ "default": "compute#storagePoolAggregatedList", ++ "description": "[Output Only] Type of resource. Always compute#storagePoolAggregatedList for aggregated lists of storage pools.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "unreachables": { ++ "description": "[Output Only] Unreachable resources.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "StoragePoolList": { ++ "description": "A list of StoragePool resources.", ++ "id": "StoragePoolList", ++ "properties": { ++ "etag": { ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "description": "A list of StoragePool resources.", ++ "items": { ++ "$ref": "StoragePool" ++ }, ++ "type": "array" ++ }, ++ "kind": { ++ "default": "compute#storagePoolList", ++ "description": "[Output Only] Type of resource. Always compute#storagePoolList for lists of storagePools.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "unreachables": { ++ "description": "[Output Only] Unreachable resources. end_interface: MixerListResponseWithEtagBuilder", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "StoragePoolResourceStatus": { ++ "description": "[Output Only] Contains output only fields.", ++ "id": "StoragePoolResourceStatus", ++ "properties": { ++ "aggregateDiskProvisionedIops": { ++ "description": "[Output Only] Sum of all the disk' provisioned IOPS.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "aggregateDiskSizeGb": { ++ "description": "[Output Only] Sum of all the capacity provisioned in disks in this storage pool. A disk's provisioned capacity is the same as its total capacity.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "lastResizeTimestamp": { ++ "description": "[Output Only] Timestamp of the last successful resize in RFC3339 text format.", ++ "type": "string" ++ }, ++ "maxAggregateDiskSizeGb": { ++ "description": "[Output Only] Maximum allowed aggregate disk size in gigabytes.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "numberOfDisks": { ++ "description": "[Output Only] Number of disks used.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "usedBytes": { ++ "description": "[Output Only] Space used by data stored in disks within the storage pool (in bytes).", ++ "format": "int64", ++ "type": "string" ++ }, ++ "usedReducedBytes": { ++ "description": "[Output Only] Space used by compressed and deduped data stored in disks within the storage pool (in bytes).", ++ "format": "int64", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "StoragePoolsScopedList": { ++ "id": "StoragePoolsScopedList", ++ "properties": { ++ "storagePools": { ++ "description": "[Output Only] A list of storage pool contained in this scope.", ++ "items": { ++ "$ref": "StoragePool" ++ }, ++ "type": "array" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning which replaces the list of storage pool when the list is empty.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, + "Subnetwork": { + "description": "Represents a Subnetwork resource. A subnetwork (also known as a subnet) is a logical partition of a Virtual Private Cloud network with one primary IP range and zero or more secondary IP ranges. For more information, read Virtual Private Cloud (VPC) Network.", + "id": "Subnetwork", +@@ -77896,7 +79805,7 @@ + "type": "string" + }, + "enableFlowLogs": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "enableL2": { +@@ -78003,7 +79912,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "AGGREGATE", + "CLOUD_EXTENSION", +@@ -78037,7 +79946,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -78370,7 +80279,7 @@ + "type": "string" + }, + "enable": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. Flow logging isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "filterExpr": { +@@ -78939,6 +80848,11 @@ + }, + "type": "array" + }, ++ "httpKeepAliveTimeoutSec": { ++ "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", ++ "format": "int32", ++ "type": "integer" ++ }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", +@@ -79330,7 +81244,7 @@ + "id": "TargetHttpsProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetHttpsProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetHttpsProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -79386,7 +81300,7 @@ + "type": "string" + }, + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -79409,6 +81323,11 @@ + }, + "type": "array" + }, ++ "httpKeepAliveTimeoutSec": { ++ "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", ++ "format": "int32", ++ "type": "integer" ++ }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", +@@ -79455,7 +81374,7 @@ + "type": "string" + }, + "serverTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted.", + "type": "string" + }, + "sslCertificates": { +@@ -80670,7 +82589,7 @@ + "id": "TargetSslProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetSslProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetSslProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -80712,7 +82631,7 @@ + "id": "TargetSslProxy", + "properties": { + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -82663,7 +84582,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "AGGREGATE", + "CLOUD_EXTENSION", +@@ -82689,7 +84608,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -83171,11 +85090,13 @@ + "description": "The stack type for this VPN gateway to identify the IP protocols that are enabled. Possible values are: IPV4_ONLY, IPV4_IPV6. If not specified, IPV4_ONLY will be used.", + "enum": [ + "IPV4_IPV6", +- "IPV4_ONLY" ++ "IPV4_ONLY", ++ "IPV6_ONLY" + ], + "enumDescriptions": [ + "Enable VPN gateway with both IPv4 and IPv6 protocols.", +- "Enable VPN gateway with only IPv4 protocol." ++ "Enable VPN gateway with only IPv4 protocol.", ++ "Enable VPN gateway with only IPv6 protocol." + ], + "type": "string" + }, +@@ -83488,7 +85409,7 @@ + "type": "integer" + }, + "peerGatewayInterface": { +- "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or GCP VPN gateway.", ++ "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "format": "uint32", + "type": "integer" + }, +@@ -83500,7 +85421,7 @@ + "type": "object" + }, + "VpnGatewayStatusVpnConnection": { +- "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be a external VPN gateway or GCP VPN gateway.", ++ "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "id": "VpnGatewayStatusVpnConnection", + "properties": { + "peerExternalGateway": { +diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +index 0b34f12ea5d..a0afc85bb79 100644 +--- a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go ++++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +@@ -75,6 +75,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "compute:alpha" + const apiName = "compute" +@@ -228,6 +229,7 @@ func New(client *http.Client) (*Service, error) { + s.Snapshots = NewSnapshotsService(s) + s.SslCertificates = NewSslCertificatesService(s) + s.SslPolicies = NewSslPoliciesService(s) ++ s.StoragePools = NewStoragePoolsService(s) + s.Subnetworks = NewSubnetworksService(s) + s.TargetGrpcProxies = NewTargetGrpcProxiesService(s) + s.TargetHttpProxies = NewTargetHttpProxiesService(s) +@@ -419,6 +421,8 @@ type Service struct { + + SslPolicies *SslPoliciesService + ++ StoragePools *StoragePoolsService ++ + Subnetworks *SubnetworksService + + TargetGrpcProxies *TargetGrpcProxiesService +@@ -1213,6 +1217,15 @@ type SslPoliciesService struct { + s *Service + } + ++func NewStoragePoolsService(s *Service) *StoragePoolsService { ++ rs := &StoragePoolsService{s: s} ++ return rs ++} ++ ++type StoragePoolsService struct { ++ s *Service ++} ++ + func NewSubnetworksService(s *Service) *SubnetworksService { + rs := &SubnetworksService{s: s} + return rs +@@ -2066,32 +2079,35 @@ func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + // AccessConfig: An access configuration attached to an instance's + // network interface. Only one access config per instance is supported. + type AccessConfig struct { +- // ExternalIpv6: The first IPv6 address of the external IPv6 range +- // associated with this instance, prefix length is stored in +- // externalIpv6PrefixLength in ipv6AccessConfig. To use a static +- // external IP address, it must be unused and in the same region as the +- // instance's zone. If not specified, Google Cloud will automatically +- // assign an external IPv6 address from the instance's subnetwork. ++ // ExternalIpv6: Applies to ipv6AccessConfigs only. The first IPv6 ++ // address of the external IPv6 range associated with this instance, ++ // prefix length is stored in externalIpv6PrefixLength in ++ // ipv6AccessConfig. To use a static external IP address, it must be ++ // unused and in the same region as the instance's zone. If not ++ // specified, Google Cloud will automatically assign an external IPv6 ++ // address from the instance's subnetwork. + ExternalIpv6 string `json:"externalIpv6,omitempty"` + +- // ExternalIpv6PrefixLength: The prefix length of the external IPv6 +- // range. ++ // ExternalIpv6PrefixLength: Applies to ipv6AccessConfigs only. The ++ // prefix length of the external IPv6 range. + ExternalIpv6PrefixLength int64 `json:"externalIpv6PrefixLength,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#accessConfig + // for access configs. + Kind string `json:"kind,omitempty"` + +- // Name: The name of this access configuration. The default and +- // recommended name is External NAT, but you can use any arbitrary +- // string, such as My external IP or Network Access. ++ // Name: The name of this access configuration. In accessConfigs (IPv4), ++ // the default and recommended name is External NAT, but you can use any ++ // arbitrary string, such as My external IP or Network Access. In ++ // ipv6AccessConfigs, the recommend name is External IPv6. + Name string `json:"name,omitempty"` + +- // NatIP: An external IP address associated with this instance. Specify +- // an unused static external IP address available to the project or +- // leave this field undefined to use an IP from a shared ephemeral IP +- // address pool. If you specify a static external IP address, it must +- // live in the same region as the zone of the instance. ++ // NatIP: Applies to accessConfigs (IPv4) only. An external IP address ++ // associated with this instance. Specify an unused static external IP ++ // address available to the project or leave this field undefined to use ++ // an IP from a shared ephemeral IP address pool. If you specify a ++ // static external IP address, it must live in the same region as the ++ // zone of the instance. + NatIP string `json:"natIP,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -2141,12 +2157,13 @@ type AccessConfig struct { + // associated. + SetPublicPtr bool `json:"setPublicPtr,omitempty"` + +- // Type: The type of configuration. The default and only option is +- // ONE_TO_ONE_NAT. ++ // Type: The type of configuration. In accessConfigs (IPv4), the default ++ // and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default ++ // and only option is DIRECT_IPV6. + // + // Possible values: + // "DIRECT_IPV6" +- // "ONE_TO_ONE_NAT" (default) ++ // "ONE_TO_ONE_NAT" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExternalIpv6") to +@@ -3060,6 +3077,17 @@ type AllocationAggregateReservation struct { + // "VM_FAMILY_MEMORY_OPTIMIZED_M3" + VmFamily string `json:"vmFamily,omitempty"` + ++ // WorkloadType: The workload type of the instances that will target ++ // this reservation. ++ // ++ // Possible values: ++ // "BATCH" - Reserved resources will be optimized for BATCH workloads, ++ // such as ML training. ++ // "SERVING" - Reserved resources will be optimized for SERVING ++ // workloads, such as ML inference. ++ // "UNSPECIFIED" ++ WorkloadType string `json:"workloadType,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "InUseResources") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -3594,6 +3622,10 @@ type AttachedDiskInitializeParams struct { + // example: pd-standard. + DiskType string `json:"diskType,omitempty"` + ++ // EnableConfidentialCompute: Whether this disk is using confidential ++ // compute mode. ++ EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` ++ + // GuestOsFeatures: A list of features to enable on the guest operating + // system. Applicable only for bootable images. Read Enabling guest + // operating system features to see a list of available options. Guest +@@ -3717,6 +3749,15 @@ type AttachedDiskInitializeParams struct { + // the source snapshot. + SourceSnapshotEncryptionKey *CustomerEncryptionKey `json:"sourceSnapshotEncryptionKey,omitempty"` + ++ // StoragePool: The storage pool in which the new disk is created. You ++ // can provide this as a partial or full URL to the resource. For ++ // example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /storagePools/storagePool - ++ // projects/project/zones/zone/storagePools/storagePool - ++ // zones/zone/storagePools/storagePool ++ StoragePool string `json:"storagePool,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "Architecture") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -4765,15 +4806,17 @@ func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { + + // AutoscalingPolicy: Cloud Autoscaler policy. + type AutoscalingPolicy struct { +- // CoolDownPeriodSec: The number of seconds that the autoscaler waits +- // before it starts collecting information from a new instance. This +- // prevents the autoscaler from collecting information when the instance +- // is initializing, during which the collected usage would not be +- // reliable. The default time autoscaler waits is 60 seconds. Virtual +- // machine initialization times might vary because of numerous factors. +- // We recommend that you test how long an instance may take to +- // initialize. To do this, create an instance and time the startup +- // process. ++ // CoolDownPeriodSec: The number of seconds that your application takes ++ // to initialize on a VM instance. This is referred to as the ++ // initialization period (/compute/docs/autoscaler#cool_down_period). ++ // Specifying an accurate initialization period improves autoscaler ++ // decisions. For example, when scaling out, the autoscaler ignores data ++ // from VMs that are still initializing because those VMs might not yet ++ // represent normal usage of your application. The default ++ // initialization period is 60 seconds. Initialization periods might ++ // vary because of numerous factors. We recommend that you test how long ++ // your application takes to initialize. To do this, create a VM and ++ // time your application's startup process. + CoolDownPeriodSec int64 `json:"coolDownPeriodSec,omitempty"` + + // CpuUtilization: Defines the CPU utilization policy that allows the +@@ -4801,7 +4844,12 @@ type AutoscalingPolicy struct { + // instances allowed. + MinNumReplicas int64 `json:"minNumReplicas,omitempty"` + +- // Mode: Defines operating mode for this policy. ++ // Mode: Defines the operating mode for this policy. The following modes ++ // are available: - OFF: Disables the autoscaler but maintains its ++ // configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM ++ // instances only. - ON: Enables all autoscaler activities according to ++ // its policy. For more information, see "Turning off or restricting an ++ // autoscaler" + // + // Possible values: + // "OFF" - Do not automatically scale the MIG in or out. The +@@ -6184,6 +6232,10 @@ type BackendService struct { + // loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED. + MaxStreamDuration *Duration `json:"maxStreamDuration,omitempty"` + ++ // Metadatas: Deployment metadata associated with the resource to be set ++ // by a GKE hub controller and read by the backend RCTH ++ Metadatas map[string]string `json:"metadatas,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -8076,9 +8128,6 @@ type BulkInsertInstanceResource struct { + // Count: The maximum number of instances to create. + Count int64 `json:"count,omitempty,string"` + +- // Instance: DEPRECATED: Please use instance_properties instead. +- Instance *Instance `json:"instance,omitempty"` +- + // InstanceProperties: The instance properties defining the VM instances + // to be created. Required if sourceInstanceTemplate is not provided. + InstanceProperties *InstanceProperties `json:"instanceProperties,omitempty"` +@@ -8151,11 +8200,15 @@ func (s *BulkInsertInstanceResource) MarshalJSON() ([]byte, error) { + // properties to be set on individual instances. To be extended in the + // future. + type BulkInsertInstanceResourcePerInstanceProperties struct { ++ // Hostname: Specifies the hostname of the instance. More details in: ++ // https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention ++ Hostname string `json:"hostname,omitempty"` ++ + // Name: This field is only temporary. It will be removed. Do not use + // it. + Name string `json:"name,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "Name") to ++ // ForceSendFields is a list of field names (e.g. "Hostname") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -8163,8 +8216,8 @@ type BulkInsertInstanceResourcePerInstanceProperties struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Name") to include in API +- // requests with the JSON null value. By default, fields with empty ++ // NullFields is a list of field names (e.g. "Hostname") to include in ++ // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. +@@ -8178,6 +8231,57 @@ func (s *BulkInsertInstanceResourcePerInstanceProperties) MarshalJSON() ([]byte, + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type BulkInsertOperationStatus struct { ++ // CreatedVmCount: [Output Only] Count of VMs successfully created so ++ // far. ++ CreatedVmCount int64 `json:"createdVmCount,omitempty"` ++ ++ // DeletedVmCount: [Output Only] Count of VMs that got deleted during ++ // rollback. ++ DeletedVmCount int64 `json:"deletedVmCount,omitempty"` ++ ++ // FailedToCreateVmCount: [Output Only] Count of VMs that started ++ // creating but encountered an error. ++ FailedToCreateVmCount int64 `json:"failedToCreateVmCount,omitempty"` ++ ++ // Status: [Output Only] Creation status of BulkInsert operation - ++ // information if the flow is rolling forward or rolling back. ++ // ++ // Possible values: ++ // "CREATING" - Rolling forward - creating VMs. ++ // "DONE" - Done ++ // "ROLLING_BACK" - Rolling back - cleaning up after an error. ++ // "STATUS_UNSPECIFIED" ++ Status string `json:"status,omitempty"` ++ ++ // TargetVmCount: [Output Only] Count of VMs originally planned to be ++ // created. ++ TargetVmCount int64 `json:"targetVmCount,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "CreatedVmCount") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "CreatedVmCount") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *BulkInsertOperationStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod BulkInsertOperationStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type BundledLocalSsds struct { + // DefaultInterface: The default disk interface if the interface is not + // specified. +@@ -8588,6 +8692,10 @@ type Commitment struct { + // Reservations: List of reservations in this commitment. + Reservations []*Reservation `json:"reservations,omitempty"` + ++ // ResourceStatus: [Output Only] Status information for Commitment ++ // resource. ++ ResourceStatus *CommitmentResourceStatus `json:"resourceStatus,omitempty"` ++ + // Resources: A list of commitment amounts for particular resources. + // Note that VCPU and MEMORY resource commitments must occur together. + Resources []*ResourceCommitment `json:"resources,omitempty"` +@@ -8599,7 +8707,7 @@ type Commitment struct { + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + +- // SplitSourceCommitment: Source commitment to be splitted into a new ++ // SplitSourceCommitment: Source commitment to be split into a new + // commitment. + SplitSourceCommitment string `json:"splitSourceCommitment,omitempty"` + +@@ -8613,6 +8721,8 @@ type Commitment struct { + // + // Possible values: + // "ACTIVE" ++ // "CANCELED_EARLY_TERMINATION" ++ // "CANCELING" + // "CANCELLED" - Deprecate CANCELED status. Will use separate status + // to differentiate cancel by mergeCud or manual cancellation. + // "CREATING" +@@ -8635,6 +8745,7 @@ type Commitment struct { + // "COMPUTE_OPTIMIZED" + // "COMPUTE_OPTIMIZED_C2D" + // "COMPUTE_OPTIMIZED_C3" ++ // "COMPUTE_OPTIMIZED_C3D" + // "GENERAL_PURPOSE" + // "GENERAL_PURPOSE_E2" + // "GENERAL_PURPOSE_N2" +@@ -9054,6 +9165,82 @@ func (s *CommitmentListWarningData) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// CommitmentResourceStatus: [Output Only] Contains output only fields. ++type CommitmentResourceStatus struct { ++ // CancellationInformation: [Output Only] An optional, contains all the ++ // needed information of cancellation. ++ CancellationInformation *CommitmentResourceStatusCancellationInformation `json:"cancellationInformation,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "CancellationInformation") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "CancellationInformation") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *CommitmentResourceStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod CommitmentResourceStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type CommitmentResourceStatusCancellationInformation struct { ++ // CanceledCommitment: [Output Only] An optional amount of CUDs canceled ++ // so far in the last 365 days. ++ CanceledCommitment *Money `json:"canceledCommitment,omitempty"` ++ ++ // CanceledCommitmentLastUpdatedTimestamp: [Output Only] An optional ++ // last update time of canceled_commitment. RFC3339 text format. ++ CanceledCommitmentLastUpdatedTimestamp string `json:"canceledCommitmentLastUpdatedTimestamp,omitempty"` ++ ++ // CancellationCap: [Output Only] An optional,the cancellation cap for ++ // how much commitments can be canceled in a rolling 365 per billing ++ // account. ++ CancellationCap *Money `json:"cancellationCap,omitempty"` ++ ++ // CancellationFee: [Output Only] An optional, cancellation fee. ++ CancellationFee *Money `json:"cancellationFee,omitempty"` ++ ++ // CancellationFeeExpirationTimestamp: [Output Only] An optional, ++ // cancellation fee expiration time. RFC3339 text format. ++ CancellationFeeExpirationTimestamp string `json:"cancellationFeeExpirationTimestamp,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "CanceledCommitment") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "CanceledCommitment") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *CommitmentResourceStatusCancellationInformation) MarshalJSON() ([]byte, error) { ++ type NoMethod CommitmentResourceStatusCancellationInformation ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type CommitmentsScopedList struct { + // Commitments: [Output Only] A list of commitments contained in this + // scope. +@@ -9302,6 +9489,7 @@ type ConfidentialInstanceConfig struct { + // "SEV" - AMD Secure Encrypted Virtualization. + // "SEV_SNP" - AMD Secure Encrypted Virtualization - Secure Nested + // Paging. ++ // "TDX" - Intel Trust Domain eXtension. + ConfidentialInstanceType string `json:"confidentialInstanceType,omitempty"` + + // EnableConfidentialCompute: Defines whether the instance should have +@@ -9851,6 +10039,10 @@ type Disk struct { + // provide a key to use the disk later. + DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + ++ // EnableConfidentialCompute: Whether this disk is using confidential ++ // compute mode. ++ EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` ++ + // EraseWindowsVssSignature: Specifies whether the disk restored from a + // source snapshot should erase Windows specific VSS signature. + EraseWindowsVssSignature bool `json:"eraseWindowsVssSignature,omitempty"` +@@ -10120,6 +10312,15 @@ type Disk struct { + // "RESTORING" - Source data is being copied into the disk. + Status string `json:"status,omitempty"` + ++ // StoragePool: The storage pool in which the new disk is created. You ++ // can provide this as a partial or full URL to the resource. For ++ // example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /storagePools/storagePool - ++ // projects/project/zones/zone/storagePools/storagePool - ++ // zones/zone/storagePools/storagePool ++ StoragePool string `json:"storagePool,omitempty"` ++ + // StorageType: [Deprecated] Storage type of the persistent disk. + // + // Possible values: +@@ -10369,6 +10570,16 @@ func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { + } + + type DiskAsyncReplication struct { ++ // ConsistencyGroupPolicy: [Output Only] URL of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicy string `json:"consistencyGroupPolicy,omitempty"` ++ ++ // ConsistencyGroupPolicyId: [Output Only] ID of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicyId string `json:"consistencyGroupPolicyId,omitempty"` ++ + // Disk: The other disk asynchronously replicated to or from the current + // disk. You can provide this as a partial or full URL to the resource. + // For example, the following are valid values: - +@@ -10385,20 +10596,22 @@ type DiskAsyncReplication struct { + // identify the exact version of the disk that was used. + DiskId string `json:"diskId,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "Disk") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. ++ // ForceSendFields is a list of field names (e.g. ++ // "ConsistencyGroupPolicy") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Disk") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. ++ // NullFields is a list of field names (e.g. "ConsistencyGroupPolicy") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. + NullFields []string `json:"-"` + } + +@@ -10783,6 +10996,11 @@ type DiskResourceStatus struct { + // AsyncSecondaryDisks: Key: disk, value: AsyncReplicationStatus message + AsyncSecondaryDisks map[string]DiskResourceStatusAsyncReplicationStatus `json:"asyncSecondaryDisks,omitempty"` + ++ // UsedBytes: [Output Only] Space used by data stored in the disk (in ++ // bytes). Note that this field is set only when the disk is in a ++ // storage pool. ++ UsedBytes int64 `json:"usedBytes,omitempty,string"` ++ + // ForceSendFields is a list of field names (e.g. "AsyncPrimaryDisk") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -13399,6 +13617,14 @@ type FirewallPolicyAssociation struct { + // Name: The name for an association. + Name string `json:"name,omitempty"` + ++ // Priority: An integer indicating the priority of an association. The ++ // priority must be a positive value between 1 and 2147483647. Firewall ++ // Policies are evaluated from highest to lowest priority where 1 is the ++ // highest priority and 2147483647 is the lowest priority. The default ++ // value is `1000`. If two associations have the same priority then ++ // lexicographical order on association names is applied. ++ Priority int64 `json:"priority,omitempty"` ++ + // ShortName: [Output Only] The short name of the firewall policy of the + // association. + ShortName string `json:"shortName,omitempty"` +@@ -14125,9 +14351,10 @@ type ForwardingRule struct { + // Network: This field is not used for external load balancing. For + // Internal TCP/UDP Load Balancing, this field identifies the network + // that the load balanced IP should belong to for this Forwarding Rule. +- // If this field is not specified, the default network will be used. For +- // Private Service Connect forwarding rules that forward traffic to +- // Google APIs, a network must be provided. ++ // If the subnetwork is specified, the network of the subnetwork will be ++ // used. If neither subnetwork nor this field is specified, the default ++ // network will be used. For Private Service Connect forwarding rules ++ // that forward traffic to Google APIs, a network must be provided. + Network string `json:"network,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -15051,6 +15278,19 @@ func (s *FutureReservationSpecificSKUProperties) MarshalJSON() ([]byte, error) { + // FutureReservationStatus: [Output only] Represents status related to + // the future reservation. + type FutureReservationStatus struct { ++ // AmendmentStatus: The current status of the requested amendment. ++ // ++ // Possible values: ++ // "AMENDMENT_APPROVED" - The requested amendment to the Future ++ // Resevation has been approved and applied by GCP. ++ // "AMENDMENT_DECLINED" - The requested amendment to the Future ++ // Reservation has been declined by GCP and the original state was ++ // restored. ++ // "AMENDMENT_IN_REVIEW" - The requested amendment to the Future ++ // Reservation is currently being reviewd by GCP. ++ // "AMENDMENT_STATUS_UNSPECIFIED" ++ AmendmentStatus string `json:"amendmentStatus,omitempty"` ++ + // AutoCreatedReservations: Fully qualified urls of the automatically + // created reservations at start_time. + AutoCreatedReservations []string `json:"autoCreatedReservations,omitempty"` +@@ -15060,6 +15300,13 @@ type FutureReservationStatus struct { + // capacity delivered as part of existing matching reservations. + FulfilledCount int64 `json:"fulfilledCount,omitempty,string"` + ++ // LastKnownGoodState: This field represents the future reservation ++ // before an amendment was requested. If the amendment is declined, the ++ // Future Reservation will be reverted to the last known good state. The ++ // last known good state is not set when updating a future reservation ++ // whose Procurement Status is DRAFTING. ++ LastKnownGoodState *FutureReservationStatusLastKnownGoodState `json:"lastKnownGoodState,omitempty"` ++ + // LockTime: Time when Future Reservation would become LOCKED, after + // which no modifications to Future Reservation will be allowed. + // Applicable only after the Future Reservation is in the APPROVED +@@ -15082,6 +15329,9 @@ type FutureReservationStatus struct { + // fulfilled. Additional reservations were provided but did not reach + // total_count reserved instance slots. + // "FULFILLED" - Future reservation is fulfilled completely. ++ // "PENDING_AMENDMENT_APPROVAL" - An Amendment to the Future ++ // Reservation has been requested. If the Amendment is declined, the ++ // Future Reservation will be restored to the last known good state. + // "PENDING_APPROVAL" - Future reservation is pending approval by GCP. + // "PROCUREMENT_STATUS_UNSPECIFIED" + // "PROCURING" - Future reservation is being procured by GCP. Beyond +@@ -15095,19 +15345,18 @@ type FutureReservationStatus struct { + + SpecificSkuProperties *FutureReservationStatusSpecificSKUProperties `json:"specificSkuProperties,omitempty"` + +- // ForceSendFields is a list of field names (e.g. +- // "AutoCreatedReservations") to unconditionally include in API +- // requests. By default, fields with empty or default values are omitted +- // from API requests. However, any non-pointer, non-interface field +- // appearing in ForceSendFields will be sent to the server regardless of +- // whether the field is empty or not. This may be used to include empty +- // fields in Patch requests. ++ // ForceSendFields is a list of field names (e.g. "AmendmentStatus") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "AutoCreatedReservations") +- // to include in API requests with the JSON null value. By default, +- // fields with empty values are omitted from API requests. However, any +- // field with an empty value appearing in NullFields will be sent to the ++ // NullFields is a list of field names (e.g. "AmendmentStatus") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. +@@ -15120,6 +15369,108 @@ func (s *FutureReservationStatus) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// FutureReservationStatusLastKnownGoodState: The state that the future ++// reservation will be reverted to should the amendment be declined. ++type FutureReservationStatusLastKnownGoodState struct { ++ // Description: The description of the FutureReservation before an ++ // amendment was requested. ++ Description string `json:"description,omitempty"` ++ ++ FutureReservationSpecs *FutureReservationStatusLastKnownGoodStateFutureReservationSpecs `json:"futureReservationSpecs,omitempty"` ++ ++ // NamePrefix: The name prefix of the Future Reservation before an ++ // amendment was requested. ++ NamePrefix string `json:"namePrefix,omitempty"` ++ ++ // ProcurementStatus: The status of the last known good state for the ++ // Future Reservation. ++ // ++ // Possible values: ++ // "APPROVED" - Future reservation is approved by GCP. ++ // "CANCELLED" - Future reservation is cancelled by the customer. ++ // "COMMITTED" - Future reservation is committed by the customer. ++ // "DECLINED" - Future reservation is rejected by GCP. ++ // "DRAFTING" - Related status for PlanningStatus.Draft. Transitions ++ // to PENDING_APPROVAL upon user submitting FR. ++ // "FAILED" - Future reservation failed. No additional reservations ++ // were provided. ++ // "FAILED_PARTIALLY_FULFILLED" - Future reservation is partially ++ // fulfilled. Additional reservations were provided but did not reach ++ // total_count reserved instance slots. ++ // "FULFILLED" - Future reservation is fulfilled completely. ++ // "PENDING_AMENDMENT_APPROVAL" - An Amendment to the Future ++ // Reservation has been requested. If the Amendment is declined, the ++ // Future Reservation will be restored to the last known good state. ++ // "PENDING_APPROVAL" - Future reservation is pending approval by GCP. ++ // "PROCUREMENT_STATUS_UNSPECIFIED" ++ // "PROCURING" - Future reservation is being procured by GCP. Beyond ++ // this point, Future reservation is locked and no further modifications ++ // are allowed. ++ // "PROVISIONING" - Future reservation capacity is being provisioned. ++ // This state will be entered after start_time, while reservations are ++ // being created to provide total_count reserved instance slots. This ++ // state will not persist past start_time + 24h. ++ ProcurementStatus string `json:"procurementStatus,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Description") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Description") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *FutureReservationStatusLastKnownGoodState) MarshalJSON() ([]byte, error) { ++ type NoMethod FutureReservationStatusLastKnownGoodState ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// FutureReservationStatusLastKnownGoodStateFutureReservationSpecs: The ++// properties of the last known good state for the Future Reservation. ++type FutureReservationStatusLastKnownGoodStateFutureReservationSpecs struct { ++ // ShareSettings: The previous share settings of the Future Reservation. ++ ShareSettings *ShareSettings `json:"shareSettings,omitempty"` ++ ++ // SpecificSkuProperties: The previous instance related properties of ++ // the Future Reservation. ++ SpecificSkuProperties *FutureReservationSpecificSKUProperties `json:"specificSkuProperties,omitempty"` ++ ++ // TimeWindow: The previous time window of the Future Reservation. ++ TimeWindow *FutureReservationTimeWindow `json:"timeWindow,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ShareSettings") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ShareSettings") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *FutureReservationStatusLastKnownGoodStateFutureReservationSpecs) MarshalJSON() ([]byte, error) { ++ type NoMethod FutureReservationStatusLastKnownGoodStateFutureReservationSpecs ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // FutureReservationStatusSpecificSKUProperties: Properties to be set + // for the Future Reservation. + type FutureReservationStatusSpecificSKUProperties struct { +@@ -16211,8 +16562,8 @@ type GuestOsFeature struct { + // commas to separate values. Set to one or more of the following + // values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - + // UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - +- // SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling +- // guest operating system features. ++ // SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see ++ // Enabling guest operating system features. + // + // Possible values: + // "BARE_METAL_LINUX_COMPATIBLE" +@@ -16223,6 +16574,7 @@ type GuestOsFeature struct { + // "SEV_CAPABLE" + // "SEV_LIVE_MIGRATABLE" + // "SEV_SNP_CAPABLE" ++ // "TDX_CAPABLE" + // "UEFI_COMPATIBLE" + // "VIRTIO_SCSI_MULTIQUEUE" + // "WINDOWS" +@@ -16587,12 +16939,12 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { + // (/compute/docs/reference/rest/alpha/regionHealthChecks) Internal + // HTTP(S) load balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Traffic Director must use global +-// health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load ++// health checks (`compute.v1.healthChecks`). Internal TCP/UDP load + // balancers can use either regional or global health checks +-// (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). ++// (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). + // External HTTP(S), TCP proxy, and SSL proxy load balancers as well as + // managed instance group auto-healing must use global health checks +-// (`compute.v1.HealthChecks`). Backend service-based network load ++// (`compute.v1.healthChecks`). Backend service-based network load + // balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Target pool-based network load + // balancers must use legacy HTTP health checks +@@ -18074,7 +18426,7 @@ type HealthStatus struct { + // instance. + ForwardingRuleIp string `json:"forwardingRuleIp,omitempty"` + +- // HealthState: Health state of the instance. ++ // HealthState: Health state of the IPv4 address of the instance. + // + // Possible values: + // "HEALTHY" +@@ -20595,9 +20947,9 @@ type Instance struct { + // cycle. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -21468,6 +21820,11 @@ type InstanceGroupManager struct { + // server generates this identifier. + Id uint64 `json:"id,omitempty,string"` + ++ // InstanceFlexibilityPolicy: Instance flexibility allowing MIG to ++ // create VMs from multiple types of machines. Instance flexibility ++ // configuration on MIG overrides instance template configuration. ++ InstanceFlexibilityPolicy *InstanceGroupManagerInstanceFlexibilityPolicy `json:"instanceFlexibilityPolicy,omitempty"` ++ + // InstanceGroup: [Output Only] The URL of the Instance Group resource. + InstanceGroup string `json:"instanceGroup,omitempty"` + +@@ -21548,6 +21905,13 @@ type InstanceGroupManager struct { + // Resizing the group also changes this number. + TargetSize int64 `json:"targetSize,omitempty"` + ++ // TargetSizeUnit: The unit of measure for the target size. ++ // ++ // Possible values: ++ // "VCPU" - TargetSize is the target count of vCPUs of VMs. ++ // "VM" - [Default] TargetSize is the target number of VMs. ++ TargetSizeUnit string `json:"targetSizeUnit,omitempty"` ++ + // TargetStoppedSize: The target number of stopped instances for this + // managed instance group. This number changes when you: - Stop instance + // using the stopInstances method or start instances using the +@@ -21949,13 +22313,14 @@ type InstanceGroupManagerAutoHealingPolicy struct { + // HealthCheck: The URL for the health check that signals autohealing. + HealthCheck string `json:"healthCheck,omitempty"` + +- // InitialDelaySec: The number of seconds that the managed instance +- // group waits before it applies autohealing policies to new instances +- // or recently recreated instances. This initial delay allows instances +- // to initialize and run their startup scripts before the instance group +- // determines that they are UNHEALTHY. This prevents the managed +- // instance group from recreating its instances prematurely. This value +- // must be from range [0, 3600]. ++ // InitialDelaySec: The initial delay is the number of seconds that a ++ // new VM takes to initialize and run its startup script. During a VM's ++ // initial delay period, the MIG ignores unsuccessful health checks ++ // because the VM might be in the startup process. This prevents the MIG ++ // from prematurely recreating a VM. If the health check receives a ++ // healthy response during the initial delay, it indicates that the ++ // startup process is complete and the VM is ready. The value of initial ++ // delay must be between 0 and 3600 seconds. The default value is 0. + InitialDelaySec int64 `json:"initialDelaySec,omitempty"` + + // MaxUnavailable: Maximum number of instances that can be unavailable +@@ -22038,6 +22403,70 @@ func (s *InstanceGroupManagerAutoHealingPolicyAutoHealingTriggers) MarshalJSON() + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InstanceGroupManagerInstanceFlexibilityPolicy struct { ++ // InstanceSelectionLists: List of instance selection options that the ++ // group will use when creating new VMs. ++ InstanceSelectionLists map[string]InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection `json:"instanceSelectionLists,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "InstanceSelectionLists") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "InstanceSelectionLists") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstanceGroupManagerInstanceFlexibilityPolicy) MarshalJSON() ([]byte, error) { ++ type NoMethod InstanceGroupManagerInstanceFlexibilityPolicy ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection struct { ++ // MachineTypes: Full machine-type names, e.g. "n1-standard-16". ++ MachineTypes []string `json:"machineTypes,omitempty"` ++ ++ // Rank: Preference of this instance selection. Lower number means ++ // higher preference. MIG will first try to create a VM based on the ++ // machine-type with lowest rank and fallback to next rank based on ++ // availability. Machine types and instance selections with the same ++ // rank have the same preference. ++ Rank int64 `json:"rank,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "MachineTypes") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "MachineTypes") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection) MarshalJSON() ([]byte, error) { ++ type NoMethod InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type InstanceGroupManagerInstanceLifecyclePolicy struct { + // DefaultActionOnFailure: Defines behaviour for all instance or + // failures +@@ -23297,7 +23726,9 @@ func (s *InstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, err + type InstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The URLs of one or more instances to delete. This can be a + // full URL or a partial URL, such as +- // zones/[ZONE]/instances/[INSTANCE_NAME]. ++ // zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have ++ // URL and can be deleted only by name. One cannot specify both URLs and ++ // names in a single request. + Instances []string `json:"instances,omitempty"` + + // SkipInstancesOnValidationError: Specifies whether the request should +@@ -26178,9 +26609,9 @@ type InstanceWithNamedPorts struct { + // Status: [Output Only] The status of the instance. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -26246,6 +26677,35 @@ func (s *InstancesAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InstancesBulkInsertOperationMetadata struct { ++ // PerLocationStatus: Status information per location (location name is ++ // key). Example key: zones/us-central1-a ++ PerLocationStatus map[string]BulkInsertOperationStatus `json:"perLocationStatus,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "PerLocationStatus") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "PerLocationStatus") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstancesBulkInsertOperationMetadata) MarshalJSON() ([]byte, error) { ++ type NoMethod InstancesBulkInsertOperationMetadata ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type InstancesGetEffectiveFirewallsResponse struct { + // FirewallPolicys: Effective firewalls from firewall policies. + FirewallPolicys []*InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy `json:"firewallPolicys,omitempty"` +@@ -26751,6 +27211,42 @@ func (s *InstancesSetNameRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InstancesSetSecurityPolicyRequest struct { ++ // NetworkInterfaces: The network interfaces that the security policy ++ // will be applied to. Network interfaces use the nicN naming format. ++ // You can only set a security policy for network interfaces with an ++ // access config. ++ NetworkInterfaces []string `json:"networkInterfaces,omitempty"` ++ ++ // SecurityPolicy: A full or partial URL to a security policy to add to ++ // this instance. If this field is set to an empty string it will remove ++ // the associated security policy. ++ SecurityPolicy string `json:"securityPolicy,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "NetworkInterfaces") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "NetworkInterfaces") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstancesSetSecurityPolicyRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod InstancesSetSecurityPolicyRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type InstancesSetServiceAccountRequest struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` +@@ -27654,9 +28150,9 @@ func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { + } + + // Interconnect: Represents an Interconnect resource. An Interconnect +-// resource is a dedicated connection between the GCP network and your +-// on-premises network. For more information, read the Dedicated +-// Interconnect Overview. ++// resource is a dedicated connection between the Google Cloud network ++// and your on-premises network. For more information, read the ++// Dedicated Interconnect Overview. + type Interconnect struct { + // AdminEnabled: Administrative status of the interconnect. When this is + // set to true, the Interconnect is functional and can carry traffic. +@@ -27665,6 +28161,16 @@ type Interconnect struct { + // set to true. + AdminEnabled bool `json:"adminEnabled,omitempty"` + ++ // AvailableFeatures: [Output only] List of features available for this ++ // interconnect, which can take one of the following values: - MACSEC If ++ // present then the interconnect was created on MACsec capable hardware ++ // ports. If not present then the interconnect is provisioned on ++ // non-MACsec capable ports and MACsec enablement will fail. ++ // ++ // Possible values: ++ // "IF_MACSEC" - Media Access Control security (MACsec) ++ AvailableFeatures []string `json:"availableFeatures,omitempty"` ++ + // CircuitInfos: [Output Only] A list of CircuitInfo objects, that + // describe the individual circuits in this LAG. + CircuitInfos []*InterconnectCircuitInfo `json:"circuitInfos,omitempty"` +@@ -27810,6 +28316,19 @@ type Interconnect struct { + // the interconnect is connected to. + RemoteLocation string `json:"remoteLocation,omitempty"` + ++ // RequestedFeatures: Optional. List of features requested for this ++ // interconnect, which can take one of the following values: - MACSEC If ++ // specified then the interconnect will be created on MACsec capable ++ // hardware ports. If not specified, the default value is false, which ++ // will allocate non-MACsec capable ports first if available. This ++ // parameter can only be provided during interconnect INSERT and cannot ++ // be changed using interconnect PATCH. Please review Interconnect ++ // Pricing for implications on enabling this flag. ++ // ++ // Possible values: ++ // "IF_MACSEC" - Media Access Control security (MACsec) ++ RequestedFeatures []string `json:"requestedFeatures,omitempty"` ++ + // RequestedLinkCount: Target number of physical links in the link + // bundle, as requested by the customer. + RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` +@@ -28026,8 +28545,7 @@ type InterconnectAttachment struct { + // attachment. If this field is not specified when creating the VLAN + // attachment, then later on when creating an HA VPN gateway on this + // VLAN attachment, the HA VPN gateway's IP address is allocated from +- // the regional external IP address pool. Not currently available +- // publicly. ++ // the regional external IP address pool. + IpsecInternalAddresses []string `json:"ipsecInternalAddresses,omitempty"` + + // Kind: [Output Only] Type of the resource. Always +@@ -29490,6 +30008,24 @@ type InterconnectLocation struct { + // zone. Example: "zone1" or "zone2". + AvailabilityZone string `json:"availabilityZone,omitempty"` + ++ // AvailableFeatures: [Output only] List of features available at this ++ // interconnect location, which can take one of the following values: - ++ // MACSEC ++ // ++ // Possible values: ++ // "IF_MACSEC" - Media Access Control security (MACsec) ++ AvailableFeatures []string `json:"availableFeatures,omitempty"` ++ ++ // AvailableLinkTypes: [Output only] List of link types available at ++ // this interconnect location, which can take one of the following ++ // values: - LINK_TYPE_ETHERNET_10G_LR - LINK_TYPE_ETHERNET_100G_LR ++ // ++ // Possible values: ++ // "LINK_TYPE_ETHERNET_100G_LR" - 100G Ethernet, LR Optics. ++ // "LINK_TYPE_ETHERNET_10G_LR" - 10G Ethernet, LR Optics. [(rate_bps) ++ // = 10000000000]; ++ AvailableLinkTypes []string `json:"availableLinkTypes,omitempty"` ++ + // City: [Output Only] Metropolitan area designator that indicates which + // city an interconnect is located. For example: "Chicago, IL", + // "Amsterdam, Netherlands". +@@ -30097,7 +30633,7 @@ func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// InterconnectRemoteLocation: Represents an Cross-Cloud Interconnect ++// InterconnectRemoteLocation: Represents a Cross-Cloud Interconnect + // Remote Location resource. You can use this resource to find remote + // location details about an Interconnect attachment (VLAN). + type InterconnectRemoteLocation struct { +@@ -30161,8 +30697,8 @@ type InterconnectRemoteLocation struct { + // Possible values: + // "LACP_SUPPORTED" - LACP_SUPPORTED: LACP is supported, and enabled + // by default on the Cross-Cloud Interconnect. +- // "LACP_UNSUPPORTED" - LACP_UNSUPPORTED: LACP is not supported and +- // will not be enabled on this port. GetDiagnostics will show ++ // "LACP_UNSUPPORTED" - LACP_UNSUPPORTED: LACP is not supported and is ++ // not be enabled on this port. GetDiagnostics shows + // bundleAggregationType as "static". GCP does not support LAGs without + // LACP, so requestedLinkCount must be 1. + Lacp string `json:"lacp,omitempty"` +@@ -30250,7 +30786,7 @@ type InterconnectRemoteLocationConstraints struct { + // incompatible with their cloud provider. Specifically, when ordering a + // redundant pair of Cross-Cloud Interconnect ports, and one of them + // uses a remote location with portPairMatchingRemoteLocation set to +- // matching, the UI will require that both ports use the same remote ++ // matching, the UI requires that both ports use the same remote + // location. + // + // Possible values: +@@ -30357,6 +30893,374 @@ type InterconnectRemoteLocationList struct { + // remote locations. + Kind string `json:"kind,omitempty"` + ++ // NextPageToken: [Output Only] This token lets you get the next page of ++ // results for list requests. If the number of results is larger than ++ // maxResults, use the nextPageToken as a value for the query parameter ++ // pageToken in the next list request. Subsequent list requests will ++ // have their own nextPageToken to continue paging through the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *InterconnectRemoteLocationListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectRemoteLocationListWarning: [Output Only] Informational ++// warning message. ++type InterconnectRemoteLocationListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InterconnectRemoteLocationListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationPermittedConnections struct { ++ // InterconnectLocation: [Output Only] URL of an Interconnect location ++ // that is permitted to connect to this Interconnect remote location. ++ InterconnectLocation string `json:"interconnectLocation,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "InterconnectLocation") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "InterconnectLocation") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationPermittedConnections ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectsGetDiagnosticsResponse: Response for the ++// InterconnectsGetDiagnosticsRequest. ++type InterconnectsGetDiagnosticsResponse struct { ++ Result *InterconnectDiagnostics `json:"result,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Result") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Result") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectsGetDiagnosticsResponse ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectsGetMacsecConfigResponse: Response for the ++// InterconnectsGetMacsecConfigRequest. ++type InterconnectsGetMacsecConfigResponse struct { ++ // Etag: end_interface: MixerGetResponseWithEtagBuilder ++ Etag string `json:"etag,omitempty"` ++ ++ Result *InterconnectMacsecConfig `json:"result,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Etag") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Etag") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectsGetMacsecConfigResponse) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectsGetMacsecConfigResponse ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InternalIpAddress struct { ++ // Cidr: IP CIDR address or range. ++ Cidr string `json:"cidr,omitempty"` ++ ++ // Owner: The owner of the internal IP address. ++ Owner string `json:"owner,omitempty"` ++ ++ // Purpose: The purpose of the internal IP address if applicable. ++ Purpose string `json:"purpose,omitempty"` ++ ++ // Region: The region of the internal IP address if applicable. ++ Region string `json:"region,omitempty"` ++ ++ // Type: The type of the internal IP address. ++ // ++ // Possible values: ++ // "PEER_RESERVED" - Reserved IP ranges on peer networks. ++ // "PEER_USED" - Used IP ranges on peer networks, including peer ++ // subnetwork IP ranges. ++ // "REMOTE_RESERVED" - Reserved IP ranges on peer networks of peer ++ // networks. ++ // "REMOTE_USED" - Used IP ranges on peer networks of peer networks. ++ // "RESERVED" - Reserved IP ranges on local network. ++ // "SUBNETWORK" - Subnetwork IP ranges on local network. ++ // "TYPE_UNSPECIFIED" ++ Type string `json:"type,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Cidr") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Cidr") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InternalIpAddress) MarshalJSON() ([]byte, error) { ++ type NoMethod InternalIpAddress ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InternalIpOwner struct { ++ // IpCidrRange: IP CIDR range being owned. ++ IpCidrRange string `json:"ipCidrRange,omitempty"` ++ ++ // Owners: URLs of the IP owners of the IP CIDR range. ++ Owners []string `json:"owners,omitempty"` ++ ++ // SystemOwned: Whether this IP CIDR range is reserved for system use. ++ SystemOwned bool `json:"systemOwned,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "IpCidrRange") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "IpCidrRange") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InternalIpOwner) MarshalJSON() ([]byte, error) { ++ type NoMethod InternalIpOwner ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type IpAddressesList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InternalIpAddress resources. ++ Items []*InternalIpAddress `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always compute#ipAddressesList ++ // for IP addresses lists. ++ Kind string `json:"kind,omitempty"` ++ + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query +@@ -30369,376 +31273,7 @@ type InterconnectRemoteLocationList struct { + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. +- Warning *InterconnectRemoteLocationListWarning `json:"warning,omitempty"` +- +- // ServerResponse contains the HTTP response code and headers from the +- // server. +- googleapi.ServerResponse `json:"-"` +- +- // ForceSendFields is a list of field names (e.g. "Id") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Id") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectRemoteLocationList +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-// InterconnectRemoteLocationListWarning: [Output Only] Informational +-// warning message. +-type InterconnectRemoteLocationListWarning struct { +- // Code: [Output Only] A warning code, if applicable. For example, +- // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in +- // the response. +- // +- // Possible values: +- // "CLEANUP_FAILED" - Warning about failed cleanup of transient +- // changes made by a failed operation. +- // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was +- // created. +- // "DEPRECATED_TYPE_USED" - When deploying and at least one of the +- // resources has a type marked as deprecated +- // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk +- // that is larger than image size. +- // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the +- // resources has a type marked as experimental +- // "EXTERNAL_API_WARNING" - Warning that is present in an external api +- // call +- // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been +- // overridden. Deprecated unused field. +- // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an +- // injected kernel, which is deprecated. +- // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV +- // backend service is associated with a health check that is not of type +- // HTTP/HTTPS/HTTP2. +- // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a +- // exceedingly large number of resources +- // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type +- // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is +- // not assigned to an instance on the network. +- // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot +- // ip forward. +- // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's +- // nextHopInstance URL refers to an instance that does not have an ipv6 +- // interface on the same network as the route. +- // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL +- // refers to an instance that does not exist. +- // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance +- // URL refers to an instance that is not on the same network as the +- // route. +- // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not +- // have a status of RUNNING. +- // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to +- // continue the process despite the mentioned error. +- // "NO_RESULTS_ON_PAGE" - No results are present on a particular list +- // page. +- // "PARTIAL_SUCCESS" - Success is reported, but some results may be +- // missing due to errors +- // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource +- // that requires a TOS they have not accepted. +- // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a +- // resource is in use. +- // "RESOURCE_NOT_DELETED" - One or more of the resources set to +- // auto-delete could not be deleted because they were in use. +- // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is +- // ignored. +- // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in +- // instance group manager is valid as such, but its application does not +- // make a lot of sense, because it allows only single instance in +- // instance group. +- // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema +- // are present +- // "UNREACHABLE" - A given scope cannot be reached. +- Code string `json:"code,omitempty"` +- +- // Data: [Output Only] Metadata about this warning in key: value format. +- // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" +- // } +- Data []*InterconnectRemoteLocationListWarningData `json:"data,omitempty"` +- +- // Message: [Output Only] A human-readable description of the warning +- // code. +- Message string `json:"message,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "Code") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Code") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectRemoteLocationListWarning +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type InterconnectRemoteLocationListWarningData struct { +- // Key: [Output Only] A key that provides more detail on the warning +- // being returned. For example, for warnings where there are no results +- // in a list request for a particular zone, this key might be scope and +- // the key value might be the zone name. Other examples might be a key +- // indicating a deprecated resource and a suggested replacement, or a +- // warning about invalid network settings (for example, if an instance +- // attempts to perform IP forwarding but is not enabled for IP +- // forwarding). +- Key string `json:"key,omitempty"` +- +- // Value: [Output Only] A warning data value corresponding to the key. +- Value string `json:"value,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "Key") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Key") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectRemoteLocationListWarningData +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type InterconnectRemoteLocationPermittedConnections struct { +- // InterconnectLocation: [Output Only] URL of an Interconnect location +- // that is permitted to connect to this Interconnect remote location. +- InterconnectLocation string `json:"interconnectLocation,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. +- // "InterconnectLocation") to unconditionally include in API requests. +- // By default, fields with empty or default values are omitted from API +- // requests. However, any non-pointer, non-interface field appearing in +- // ForceSendFields will be sent to the server regardless of whether the +- // field is empty or not. This may be used to include empty fields in +- // Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "InterconnectLocation") to +- // include in API requests with the JSON null value. By default, fields +- // with empty values are omitted from API requests. However, any field +- // with an empty value appearing in NullFields will be sent to the +- // server as null. It is an error if a field in this list has a +- // non-empty value. This may be used to include null fields in Patch +- // requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectRemoteLocationPermittedConnections +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-// InterconnectsGetDiagnosticsResponse: Response for the +-// InterconnectsGetDiagnosticsRequest. +-type InterconnectsGetDiagnosticsResponse struct { +- Result *InterconnectDiagnostics `json:"result,omitempty"` +- +- // ServerResponse contains the HTTP response code and headers from the +- // server. +- googleapi.ServerResponse `json:"-"` +- +- // ForceSendFields is a list of field names (e.g. "Result") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Result") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectsGetDiagnosticsResponse +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-// InterconnectsGetMacsecConfigResponse: Response for the +-// InterconnectsGetMacsecConfigRequest. +-type InterconnectsGetMacsecConfigResponse struct { +- // Etag: end_interface: MixerGetResponseWithEtagBuilder +- Etag string `json:"etag,omitempty"` +- +- Result *InterconnectMacsecConfig `json:"result,omitempty"` +- +- // ServerResponse contains the HTTP response code and headers from the +- // server. +- googleapi.ServerResponse `json:"-"` +- +- // ForceSendFields is a list of field names (e.g. "Etag") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Etag") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectsGetMacsecConfigResponse) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectsGetMacsecConfigResponse +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type InternalIpAddress struct { +- // Cidr: IP CIDR address or range. +- Cidr string `json:"cidr,omitempty"` +- +- // Owner: The owner of the internal IP address. +- Owner string `json:"owner,omitempty"` +- +- // Purpose: The purpose of the internal IP address if applicable. +- Purpose string `json:"purpose,omitempty"` +- +- // Region: The region of the internal IP address if applicable. +- Region string `json:"region,omitempty"` +- +- // Type: The type of the internal IP address. +- // +- // Possible values: +- // "PEER_RESERVED" - Reserved IP ranges on peer networks. +- // "PEER_USED" - Used IP ranges on peer networks, including peer +- // subnetwork IP ranges. +- // "REMOTE_RESERVED" - Reserved IP ranges on peer networks of peer +- // networks. +- // "REMOTE_USED" - Used IP ranges on peer networks of peer networks. +- // "RESERVED" - Reserved IP ranges on local network. +- // "SUBNETWORK" - Subnetwork IP ranges on local network. +- // "TYPE_UNSPECIFIED" +- Type string `json:"type,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "Cidr") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Cidr") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InternalIpAddress) MarshalJSON() ([]byte, error) { +- type NoMethod InternalIpAddress +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type InternalIpOwner struct { +- // IpCidrRange: IP CIDR range being owned. +- IpCidrRange string `json:"ipCidrRange,omitempty"` +- +- // Owners: URLs of the IP owners of the IP CIDR range. +- Owners []string `json:"owners,omitempty"` +- +- // SystemOwned: Whether this IP CIDR range is reserved for system use. +- SystemOwned bool `json:"systemOwned,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "IpCidrRange") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "IpCidrRange") to include +- // in API requests with the JSON null value. By default, fields with +- // empty values are omitted from API requests. However, any field with +- // an empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InternalIpOwner) MarshalJSON() ([]byte, error) { +- type NoMethod InternalIpOwner +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type IpAddressesList struct { +- // Id: [Output Only] Unique identifier for the resource; defined by the +- // server. +- Id string `json:"id,omitempty"` +- +- // Items: A list of InternalIpAddress resources. +- Items []*InternalIpAddress `json:"items,omitempty"` +- +- // Kind: [Output Only] Type of resource. Always compute#ipAddressesList +- // for IP addresses lists. +- Kind string `json:"kind,omitempty"` +- +- // NextPageToken: [Output Only] This token allows you to get the next +- // page of results for list requests. If the number of results is larger +- // than maxResults, use the nextPageToken as a value for the query +- // parameter pageToken in the next list request. Subsequent list +- // requests will have their own nextPageToken to continue paging through +- // the results. +- NextPageToken string `json:"nextPageToken,omitempty"` +- +- // SelfLink: [Output Only] Server-defined URL for this resource. +- SelfLink string `json:"selfLink,omitempty"` +- +- // Warning: [Output Only] Informational warning message. +- Warning *IpAddressesListWarning `json:"warning,omitempty"` ++ Warning *IpAddressesListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. +@@ -32422,7 +32957,7 @@ type MachineTypeAccelerators struct { + GuestAcceleratorCount int64 `json:"guestAcceleratorCount,omitempty"` + + // GuestAcceleratorType: The accelerator type resource name, not a full +- // URL, e.g. 'nvidia-tesla-k80'. ++ // URL, e.g. nvidia-tesla-t4. + GuestAcceleratorType string `json:"guestAcceleratorType,omitempty"` + + // ForceSendFields is a list of field names (e.g. +@@ -33084,9 +33619,9 @@ type ManagedInstance struct { + // is empty when the instance does not exist. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -33630,6 +34165,46 @@ func (s *MetadataFilterLabelMatch) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// Money: Represents an amount of money with its currency type. ++type Money struct { ++ // CurrencyCode: The three-letter currency code defined in ISO 4217. ++ CurrencyCode string `json:"currencyCode,omitempty"` ++ ++ // Nanos: Number of nano (10^-9) units of the amount. The value must be ++ // between -999,999,999 and +999,999,999 inclusive. If `units` is ++ // positive, `nanos` must be positive or zero. If `units` is zero, ++ // `nanos` can be positive, zero, or negative. If `units` is negative, ++ // `nanos` must be negative or zero. For example $-1.75 is represented ++ // as `units`=-1 and `nanos`=-750,000,000. ++ Nanos int64 `json:"nanos,omitempty"` ++ ++ // Units: The whole units of the amount. For example if `currencyCode` ++ // is "USD", then 1 unit is one US dollar. ++ Units int64 `json:"units,omitempty,string"` ++ ++ // ForceSendFields is a list of field names (e.g. "CurrencyCode") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "CurrencyCode") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *Money) MarshalJSON() ([]byte, error) { ++ type NoMethod Money ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // MutualTls: [Deprecated] Configuration for the mutual Tls mode for + // peer authentication. Configuration for the mutual Tls mode for peer + // authentication. +@@ -33703,6 +34278,113 @@ func (s *NamedPort) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// NatIpInfo: Contains NAT IP information of a NAT config (i.e. usage ++// status, mode). ++type NatIpInfo struct { ++ // NatIpInfoMappings: A list of all NAT IPs assigned to this NAT config. ++ NatIpInfoMappings []*NatIpInfoNatIpInfoMapping `json:"natIpInfoMappings,omitempty"` ++ ++ // NatName: Name of the NAT config which the NAT IP belongs to. ++ NatName string `json:"natName,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "NatIpInfoMappings") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "NatIpInfoMappings") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NatIpInfo) MarshalJSON() ([]byte, error) { ++ type NoMethod NatIpInfo ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// NatIpInfoNatIpInfoMapping: Contains information of a NAT IP. ++type NatIpInfoNatIpInfoMapping struct { ++ // Mode: Specifies whether NAT IP is auto or manual. ++ // ++ // Possible values: ++ // "AUTO" ++ // "MANUAL" ++ Mode string `json:"mode,omitempty"` ++ ++ // NatIp: NAT IP address. For example: 203.0.113.11. ++ NatIp string `json:"natIp,omitempty"` ++ ++ // Usage: Specifies whether NAT IP is currently serving at least one ++ // endpoint or not. ++ // ++ // Possible values: ++ // "IN_USE" ++ // "UNUSED" ++ Usage string `json:"usage,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Mode") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Mode") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NatIpInfoNatIpInfoMapping) MarshalJSON() ([]byte, error) { ++ type NoMethod NatIpInfoNatIpInfoMapping ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type NatIpInfoResponse struct { ++ // Result: [Output Only] A list of NAT IP information. ++ Result []*NatIpInfo `json:"result,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Result") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Result") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NatIpInfoResponse) MarshalJSON() ([]byte, error) { ++ type NoMethod NatIpInfoResponse ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // Network: Represents a VPC Network resource. Networks connect + // resources to each other and to the internet. For more information, + // read Virtual Private Cloud (VPC) Network. +@@ -33740,7 +34422,7 @@ type Network struct { + FirewallPolicy string `json:"firewallPolicy,omitempty"` + + // GatewayIPv4: [Output Only] The gateway address for default routing +- // out of the network, selected by GCP. ++ // out of the network, selected by Google Cloud. + GatewayIPv4 string `json:"gatewayIPv4,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This +@@ -33858,10 +34540,9 @@ type NetworkAttachment struct { + // property when you create the resource. + Description string `json:"description,omitempty"` + +- // Fingerprint: [Output Only] Fingerprint of this resource. A hash of +- // the contents stored in this object. This field is used in optimistic +- // locking. An up-to-date fingerprint must be provided in order to +- // patch. ++ // Fingerprint: Fingerprint of this resource. A hash of the contents ++ // stored in this object. This field is used in optimistic locking. An ++ // up-to-date fingerprint must be provided in order to patch. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The +@@ -33881,7 +34562,11 @@ type NetworkAttachment struct { + Name string `json:"name,omitempty"` + + // Network: [Output Only] The URL of the network which the Network +- // Attachment belongs to. ++ // Attachment belongs to. Practically it is inferred by fetching the ++ // network of the first subnetwork associated. Because it is required ++ // that all the subnetworks must be from the same network, it is assured ++ // that the Network Attachment belongs to the same network as all the ++ // subnetworks. + Network string `json:"network,omitempty"` + + // ProducerAcceptLists: Projects that are allowed to connect to this +@@ -34132,7 +34817,7 @@ func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, erro + // NetworkAttachmentConnectedEndpoint: [Output Only] A connection + // connected to this network attachment. + type NetworkAttachmentConnectedEndpoint struct { +- // IpAddress: The IP address assigned to the producer instance network ++ // IpAddress: The IPv4 address assigned to the producer instance network + // interface. This value will be a range in case of Serverless. + IpAddress string `json:"ipAddress,omitempty"` + +@@ -34140,7 +34825,7 @@ type NetworkAttachmentConnectedEndpoint struct { + // the IP was assigned. + ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` + +- // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork ++ // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork. + SecondaryIpCidrRanges []string `json:"secondaryIpCidrRanges,omitempty"` + + // Status: The status of a connected endpoint to this network +@@ -36487,10 +37172,11 @@ type NetworkInterface struct { + // number. It'll be empty if not specified by the users. + QueueCount int64 `json:"queueCount,omitempty"` + +- // StackType: The stack type for this network interface to identify +- // whether the IPv6 feature is enabled or not. If not specified, +- // IPV4_ONLY will be used. This field can be both set at instance +- // creation and update network interface operations. ++ // StackType: The stack type for this network interface. To assign only ++ // IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 ++ // addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This ++ // field can be both set at instance creation and update network ++ // interface operations. + // + // Possible values: + // "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 +@@ -40437,6 +41123,8 @@ type Operation struct { + // This value is in RFC3339 text format. + InsertTime string `json:"insertTime,omitempty"` + ++ InstancesBulkInsertOperationMetadata *InstancesBulkInsertOperationMetadata `json:"instancesBulkInsertOperationMetadata,omitempty"` ++ + // Kind: [Output Only] Type of the resource. Always `compute#operation` + // for Operation resources. + Kind string `json:"kind,omitempty"` +@@ -45532,11 +46220,15 @@ type Quota struct { + // "NETWORK_ATTACHMENTS" + // "NETWORK_ENDPOINT_GROUPS" + // "NETWORK_FIREWALL_POLICIES" ++ // "NET_LB_SECURITY_POLICIES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION" + // "NODE_GROUPS" + // "NODE_TEMPLATES" + // "NVIDIA_A100_80GB_GPUS" + // "NVIDIA_A100_GPUS" + // "NVIDIA_K80_GPUS" ++ // "NVIDIA_L4_GPUS" + // "NVIDIA_P100_GPUS" + // "NVIDIA_P100_VWS_GPUS" + // "NVIDIA_P4_GPUS" +@@ -45551,6 +46243,7 @@ type Quota struct { + // "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS" + // "PREEMPTIBLE_NVIDIA_A100_GPUS" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" ++ // "PREEMPTIBLE_NVIDIA_L4_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_GPUS" +@@ -45578,6 +46271,7 @@ type Quota struct { + // "ROUTES" + // "SECURITY_POLICIES" + // "SECURITY_POLICIES_PER_REGION" ++ // "SECURITY_POLICY_ADVANCED_RULES_PER_REGION" + // "SECURITY_POLICY_CEVAL_RULES" + // "SECURITY_POLICY_RULES" + // "SECURITY_POLICY_RULES_PER_REGION" +@@ -50577,8 +51271,6 @@ type ResourceStatus struct { + // instance.serviceIntegrationSpecs. + ServiceIntegrationStatuses map[string]ResourceStatusServiceIntegrationStatus `json:"serviceIntegrationStatuses,omitempty"` + +- UpcomingMaintenance *ResourceStatusUpcomingMaintenance `json:"upcomingMaintenance,omitempty"` +- + // ForceSendFields is a list of field names (e.g. "PhysicalHost") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -50720,34 +51412,6 @@ func (s *ResourceStatusServiceIntegrationStatusBackupDRStatus) MarshalJSON() ([] + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-type ResourceStatusUpcomingMaintenance struct { +- // CanReschedule: Indicates if the maintenance can be customer +- // triggered. See go/sf-ctm-design for more details +- CanReschedule bool `json:"canReschedule,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "CanReschedule") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "CanReschedule") to include +- // in API requests with the JSON null value. By default, fields with +- // empty values are omitted from API requests. However, any field with +- // an empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *ResourceStatusUpcomingMaintenance) MarshalJSON() ([]byte, error) { +- type NoMethod ResourceStatusUpcomingMaintenance +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- + // RolloutPolicy: A rollout policy configuration. + type RolloutPolicy struct { + // DefaultRolloutTime: An optional RFC3339 timestamp on or after which +@@ -51771,14 +52435,15 @@ type RouterBgpPeer struct { + // Bfd: BFD configuration for the BGP peering. + Bfd *RouterBgpPeerBfd `json:"bfd,omitempty"` + +- // CustomLearnedIpRanges: User-defined Custom Learned Route IP range +- // list for a BGP session. ++ // CustomLearnedIpRanges: A list of user-defined custom learned route IP ++ // address ranges for a BGP session. + CustomLearnedIpRanges []*RouterBgpPeerCustomLearnedIpRange `json:"customLearnedIpRanges,omitempty"` + +- // CustomLearnedRoutePriority: User-defined Custom Learned Route +- // Priority for a BGP session. This will be applied to all Custom +- // Learned Route ranges of the BGP session, if not given, google-managed +- // priority of 100 is used. ++ // CustomLearnedRoutePriority: The user-defined custom learned route ++ // priority for a BGP session. This value is applied to all custom ++ // learned route ranges for the session. You can choose a value from `0` ++ // to `65335`. If you don't provide a value, Google Cloud assigns a ++ // priority of `100` to the ranges. + CustomLearnedRoutePriority int64 `json:"customLearnedRoutePriority,omitempty"` + + // Enable: The status of the BGP peer connection. If set to FALSE, any +@@ -51978,10 +52643,10 @@ func (s *RouterBgpPeerBfd) MarshalJSON() ([]byte, error) { + } + + type RouterBgpPeerCustomLearnedIpRange struct { +- // Range: The Custom Learned Route IP range. Must be a valid +- // CIDR-formatted prefix. If an IP is provided without a subnet mask, it +- // is interpreted as a /32 singular IP range for IPv4, and /128 for +- // IPv6. ++ // Range: The custom learned route IP address range. Must be a valid ++ // CIDR-formatted prefix. If an IP address is provided without a subnet ++ // mask, it is interpreted as, for IPv4, a `/32` singular IP address ++ // range, and, for IPv6, `/128`. + Range string `json:"range,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Range") to +@@ -52438,10 +53103,9 @@ type RouterNat struct { + // in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list + // of Subnetworks are allowed to Nat (specified in the field subnetwork + // below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. +- // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or +- // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any +- // other Router.Nat section in any Router for this network in this +- // region. ++ // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then ++ // there should not be any other Router.Nat section in any Router for ++ // this network in this region. + // + // Possible values: + // "ALL_SUBNETWORKS_ALL_IP_RANGES" - All the IP ranges in every +@@ -54461,13 +55125,17 @@ func (s *SecurityPolicyAdaptiveProtectionConfigAutoDeployConfig) UnmarshalJSON(d + } + + // SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig: +-// Configuration options for L7 DDoS detection. ++// Configuration options for L7 DDoS detection. This field is only ++// supported in Global Security Policies of type CLOUD_ARMOR. + type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { +- // Enable: If set to true, enables CAAP for L7 DDoS detection. ++ // Enable: If set to true, enables CAAP for L7 DDoS detection. This ++ // field is only supported in Global Security Policies of type ++ // CLOUD_ARMOR. + Enable bool `json:"enable,omitempty"` + + // RuleVisibility: Rule visibility can be one of the following: STANDARD +- // - opaque rules. (default) PREMIUM - transparent rules. ++ // - opaque rules. (default) PREMIUM - transparent rules. This field is ++ // only supported in Global Security Policies of type CLOUD_ARMOR. + // + // Possible values: + // "PREMIUM" +@@ -54505,6 +55173,7 @@ type SecurityPolicyAdvancedOptionsConfig struct { + // Possible values: + // "DISABLED" + // "STANDARD" ++ // "STANDARD_WITH_GRAPHQL" + JsonParsing string `json:"jsonParsing,omitempty"` + + // Possible values: +@@ -54512,6 +55181,10 @@ type SecurityPolicyAdvancedOptionsConfig struct { + // "VERBOSE" + LogLevel string `json:"logLevel,omitempty"` + ++ // UserIpRequestHeaders: An optional list of case-insensitive request ++ // header names to use for resolving the callers client IP address. ++ UserIpRequestHeaders []string `json:"userIpRequestHeaders,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "JsonCustomConfig") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -54867,7 +55540,8 @@ type SecurityPolicyRecaptchaOptionsConfig struct { + // GOOGLE_RECAPTCHA under the security policy. The specified site key + // needs to be created from the reCAPTCHA API. The user is responsible + // for the validity of the specified site key. If not specified, a +- // Google-managed site key is used. ++ // Google-managed site key is used. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectSiteKey string `json:"redirectSiteKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RedirectSiteKey") to +@@ -54935,10 +55609,11 @@ type SecurityPolicyRule struct { + // rate_limit_options to be set. - redirect: redirect to a different + // target. This can either be an internal reCAPTCHA redirect, or an + // external URL-based redirect via a 302 response. Parameters for this +- // action can be configured via redirectOptions. - throttle: limit +- // client traffic to the configured threshold. Configure parameters for +- // this action in rateLimitOptions. Requires rate_limit_options to be +- // set for this. ++ // action can be configured via redirectOptions. This action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. - ++ // throttle: limit client traffic to the configured threshold. Configure ++ // parameters for this action in rateLimitOptions. Requires ++ // rate_limit_options to be set for this. + Action string `json:"action,omitempty"` + + // Description: An optional description of this resource. Provide this +@@ -54962,7 +55637,8 @@ type SecurityPolicyRule struct { + EnableLogging bool `json:"enableLogging,omitempty"` + + // HeaderAction: Optional, additional actions that are performed on +- // headers. ++ // headers. This field is only supported in Global Security Policies of ++ // type CLOUD_ARMOR. + HeaderAction *SecurityPolicyRuleHttpHeaderAction `json:"headerAction,omitempty"` + + // Kind: [Output only] Type of the resource. Always +@@ -55018,7 +55694,8 @@ type SecurityPolicyRule struct { + RateLimitOptions *SecurityPolicyRuleRateLimitOptions `json:"rateLimitOptions,omitempty"` + + // RedirectOptions: Parameters defining the redirect action. Cannot be +- // specified for any other actions. ++ // specified for any other actions. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectOptions *SecurityPolicyRuleRedirectOptions `json:"redirectOptions,omitempty"` + + // RedirectTarget: This must be specified for redirect actions. Cannot +@@ -55151,9 +55828,19 @@ type SecurityPolicyRuleMatcher struct { + + // Expr: User defined CEVAL expression. A CEVAL expression is used to + // specify match criteria such as origin.ip, source.region_code and +- // contents in the request header. ++ // contents in the request header. Expressions containing ++ // `evaluateThreatIntelligence` require Cloud Armor Managed Protection ++ // Plus tier and are not supported in Edge Policies nor in Regional ++ // Policies. Expressions containing ++ // `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor ++ // Managed Protection Plus tier and are only supported in Global ++ // Security Policies. + Expr *Expr `json:"expr,omitempty"` + ++ // ExprOptions: The configuration options available when specifying a ++ // user defined CEVAL expression (i.e., 'expr'). ++ ExprOptions *SecurityPolicyRuleMatcherExprOptions `json:"exprOptions,omitempty"` ++ + // VersionedExpr: Preconfigured versioned expression. If this field is + // specified, config must also be specified. Available preconfigured + // expressions along with their requirements are: SRC_IPS_V1 - must +@@ -55306,6 +55993,73 @@ func (s *SecurityPolicyRuleMatcherConfigLayer4Config) MarshalJSON() ([]byte, err + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type SecurityPolicyRuleMatcherExprOptions struct { ++ // RecaptchaOptions: reCAPTCHA configuration options to be applied for ++ // the rule. If the rule does not evaluate reCAPTCHA tokens, this field ++ // will have no effect. ++ RecaptchaOptions *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions `json:"recaptchaOptions,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "RecaptchaOptions") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "RecaptchaOptions") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleMatcherExprOptions) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleMatcherExprOptions ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions struct { ++ // ActionTokenSiteKeys: A list of site keys to be used during the ++ // validation of reCAPTCHA action-tokens. The provided site keys need to ++ // be created from reCAPTCHA API under the same project where the ++ // security policy is created. ++ ActionTokenSiteKeys []string `json:"actionTokenSiteKeys,omitempty"` ++ ++ // SessionTokenSiteKeys: A list of site keys to be used during the ++ // validation of reCAPTCHA session-tokens. The provided site keys need ++ // to be created from reCAPTCHA API under the same project where the ++ // security policy is created. ++ SessionTokenSiteKeys []string `json:"sessionTokenSiteKeys,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ActionTokenSiteKeys") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ActionTokenSiteKeys") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // SecurityPolicyRuleNetworkMatcher: Represents a match condition that + // incoming network traffic is evaluated against. + type SecurityPolicyRuleNetworkMatcher struct { +@@ -55593,7 +56347,8 @@ type SecurityPolicyRuleRateLimitOptions struct { + // response code, or redirect to a different endpoint. Valid options are + // `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, + // and 502, and `redirect`, where the redirect parameters come from +- // `exceedRedirectOptions` below. ++ // `exceedRedirectOptions` below. The `redirect` action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. + ExceedAction string `json:"exceedAction,omitempty"` + + // ExceedActionRpcStatus: Specified gRPC response status for proxyless +@@ -55602,7 +56357,8 @@ type SecurityPolicyRuleRateLimitOptions struct { + + // ExceedRedirectOptions: Parameters defining the redirect action that + // is used as the exceed action. Cannot be specified if the exceed +- // action is not redirect. ++ // action is not redirect. This field is only supported in Global ++ // Security Policies of type CLOUD_ARMOR. + ExceedRedirectOptions *SecurityPolicyRuleRedirectOptions `json:"exceedRedirectOptions,omitempty"` + + // RateLimitThreshold: Threshold at which to begin ratelimiting. +@@ -55881,7 +56637,7 @@ type SecuritySettings struct { + // should authenticate with this service's backends. clientTlsPolicy + // only applies to a global BackendService with the loadBalancingScheme + // set to INTERNAL_SELF_MANAGED. If left blank, communications are not +- // encrypted. Note: This field currently has no impact. ++ // encrypted. + ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` + + // ClientTlsSettings: [Deprecated] TLS Settings for the backend service. +@@ -55899,8 +56655,7 @@ type SecuritySettings struct { + // Public Key Infrastructure which provisions server identities. Only + // applies to a global BackendService with loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. Only applies when BackendService has an +- // attached clientTlsPolicy with clientCertificate (mTLS mode). Note: +- // This field currently has no impact. ++ // attached clientTlsPolicy with clientCertificate (mTLS mode). + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Authentication") to +@@ -56097,7 +56852,7 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + // attachment represents a service that a producer has exposed. It + // encapsulates the load balancer which fronts the service runs and a + // list of NAT IP ranges that the producers uses to represent the +-// consumers connecting to the service. next tag = 20 ++// consumers connecting to the service. + type ServiceAttachment struct { + // ConnectedEndpoints: [Output Only] An array of connections for all the + // consumers connected to this service attachment. +@@ -56182,6 +56937,18 @@ type ServiceAttachment struct { + // the PSC service attachment. + PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"` + ++ // ReconcileConnections: This flag determines whether a consumer ++ // accept/reject list change can reconcile the statuses of existing ++ // ACCEPTED or REJECTED PSC endpoints. - If false, connection policy ++ // update will only affect existing PENDING PSC endpoints. Existing ++ // ACCEPTED/REJECTED endpoints will remain untouched regardless how the ++ // connection policy is modified . - If true, update will affect both ++ // PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED ++ // PSC endpoint will be moved to REJECTED if its project is added to the ++ // reject list. For newly created service attachment, this boolean ++ // defaults to true. ++ ReconcileConnections bool `json:"reconcileConnections,omitempty"` ++ + // Region: [Output Only] URL of the region where the service attachment + // resides. This field applies only to the region resource. You must + // specify this field as part of the HTTP request URL. It is not +@@ -56918,8 +57685,7 @@ func (s *ServiceIntegrationSpecBackupDRSpec) MarshalJSON() ([]byte, error) { + + // SetCommonInstanceMetadataOperationMetadata: Encapsulates partial + // completion metadata for SetCommonInstanceMetadata. Will be propagated +-// on Operation.metadata as per go/partial-completion-api-clean. See +-// go/gce-aips/2822 for API council results. ++// on Operation.metadata. + type SetCommonInstanceMetadataOperationMetadata struct { + ClientOperationId string `json:"clientOperationId,omitempty"` + +@@ -57478,6 +58244,11 @@ type Snapshot struct { + // snapshot to a disk. + DownloadBytes int64 `json:"downloadBytes,omitempty,string"` + ++ // EnableConfidentialCompute: Whether this snapshot is created from a ++ // confidential compute mode disk. [Output Only]: This field is not set ++ // by user, but from source disk. ++ EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` ++ + // GuestFlush: [Input Only] Whether to attempt an application consistent + // snapshot by informing the OS to prepare for the snapshot process. + GuestFlush bool `json:"guestFlush,omitempty"` +@@ -57574,6 +58345,10 @@ type Snapshot struct { + // customer-supplied encryption key. + SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` + ++ // SourceDiskForRecoveryCheckpoint: The source disk whose recovery ++ // checkpoint will be used to create this snapshot. ++ SourceDiskForRecoveryCheckpoint string `json:"sourceDiskForRecoveryCheckpoint,omitempty"` ++ + // SourceDiskId: [Output Only] The ID value of the disk used to create + // this snapshot. This value may be used to determine whether the + // snapshot was taken from the current or a previous instance of a given +@@ -59851,278 +60626,94 @@ func (s *Status) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// Subnetwork: Represents a Subnetwork resource. A subnetwork (also +-// known as a subnet) is a logical partition of a Virtual Private Cloud +-// network with one primary IP range and zero or more secondary IP +-// ranges. For more information, read Virtual Private Cloud (VPC) +-// Network. +-type Subnetwork struct { +- // AggregationInterval: Can only be specified if VPC flow logging for +- // this subnetwork is enabled. Sets the aggregation interval for +- // collecting flow logs. Increasing the interval time reduces the amount +- // of generated flow logs for long-lasting connections. Default is an +- // interval of 5 seconds per connection. Valid values: INTERVAL_5_SEC, +- // INTERVAL_30_SEC, INTERVAL_1_MIN, INTERVAL_5_MIN, INTERVAL_10_MIN, +- // INTERVAL_15_MIN. +- // +- // Possible values: +- // "INTERVAL_10_MIN" +- // "INTERVAL_15_MIN" +- // "INTERVAL_1_MIN" +- // "INTERVAL_30_SEC" +- // "INTERVAL_5_MIN" +- // "INTERVAL_5_SEC" +- AggregationInterval string `json:"aggregationInterval,omitempty"` +- +- // AllowSubnetCidrRoutesOverlap: Whether this subnetwork's ranges can +- // conflict with existing static routes. Setting this to true allows +- // this subnetwork's primary and secondary ranges to overlap with (and +- // contain) static routes that have already been configured on the +- // corresponding network. For example if a static route has range +- // 10.1.0.0/16, a subnet range 10.0.0.0/8 could only be created if +- // allow_conflicting_routes=true. Overlapping is only allowed on +- // subnetwork operations; routes whose ranges conflict with this +- // subnetwork's ranges won't be allowed unless +- // route.allow_conflicting_subnetworks is set to true. Typically packets +- // destined to IPs within the subnetwork (which may contain +- // private/sensitive data) are prevented from leaving the virtual +- // network. Setting this field to true will disable this feature. The +- // default value is false and applies to all existing subnetworks and +- // automatically created subnetworks. This field cannot be set to true +- // at resource creation time. +- AllowSubnetCidrRoutesOverlap bool `json:"allowSubnetCidrRoutesOverlap,omitempty"` +- ++// StoragePool: Represents a zonal storage pool resource. ++type StoragePool struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this +- // property when you create the resource. This field can be set only at +- // resource creation time. ++ // property when you create the resource. + Description string `json:"description,omitempty"` + +- // EnableFlowLogs: Whether to enable flow logging for this subnetwork. +- // If this field is not explicitly set, it will not appear in get +- // listings. If not set the default behavior is determined by the org +- // policy, if there is no org policy specified, then it will default to +- // disabled. This field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. +- EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` +- +- // EnableL2: Enables Layer2 communication on the subnetwork. +- EnableL2 bool `json:"enableL2,omitempty"` +- +- // EnablePrivateV6Access: Deprecated in favor of enable in +- // PrivateIpv6GoogleAccess. Whether the VMs in this subnet can directly +- // access Google services via internal IPv6 addresses. This field can be +- // both set at resource creation time and updated using patch. +- EnablePrivateV6Access bool `json:"enablePrivateV6Access,omitempty"` +- +- // ExternalIpv6Prefix: The external IPv6 address range that is owned by +- // this subnetwork. +- ExternalIpv6Prefix string `json:"externalIpv6Prefix,omitempty"` +- +- // Fingerprint: Fingerprint of this resource. A hash of the contents +- // stored in this object. This field is used in optimistic locking. This +- // field will be ignored when inserting a Subnetwork. An up-to-date +- // fingerprint must be provided in order to update the Subnetwork, +- // otherwise the request will fail with error 412 conditionNotMet. To +- // see the latest fingerprint, make a get() request to retrieve a +- // Subnetwork. +- Fingerprint string `json:"fingerprint,omitempty"` +- +- // FlowSampling: Can only be specified if VPC flow logging for this +- // subnetwork is enabled. The value of the field must be in [0, 1]. Set +- // the sampling rate of VPC flow logs within the subnetwork where 1.0 +- // means all collected logs are reported and 0.0 means no logs are +- // reported. Default is 0.5 unless otherwise specified by the org +- // policy, which means half of all collected logs are reported. +- FlowSampling float64 `json:"flowSampling,omitempty"` +- +- // GatewayAddress: [Output Only] The gateway address for default routes +- // to reach destination addresses outside this subnetwork. +- GatewayAddress string `json:"gatewayAddress,omitempty"` +- + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + +- // InternalIpv6Prefix: [Output Only] The internal IPv6 address range +- // that is assigned to this subnetwork. +- InternalIpv6Prefix string `json:"internalIpv6Prefix,omitempty"` +- +- // IpCidrRange: The range of internal addresses that are owned by this +- // subnetwork. Provide this property when you create the subnetwork. For +- // example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and +- // non-overlapping within a network. Only IPv4 is supported. This field +- // is set at resource creation time. The range can be any range listed +- // in the Valid ranges list. The range can be expanded after creation +- // using expandIpCidrRange. +- IpCidrRange string `json:"ipCidrRange,omitempty"` +- +- // Ipv6AccessType: The access type of IPv6 address this subnet holds. +- // It's immutable and can only be specified during creation or the first +- // time the subnet is updated into IPV4_IPV6 dual stack. +- // +- // Possible values: +- // "EXTERNAL" - VMs on this subnet will be assigned IPv6 addresses +- // that are accessible via the Internet, as well as the VPC network. +- // "INTERNAL" - VMs on this subnet will be assigned IPv6 addresses +- // that are only accessible over the VPC network. +- Ipv6AccessType string `json:"ipv6AccessType,omitempty"` +- +- // Ipv6CidrRange: [Output Only] This field is for internal use. +- Ipv6CidrRange string `json:"ipv6CidrRange,omitempty"` +- +- // Kind: [Output Only] Type of the resource. Always compute#subnetwork +- // for Subnetwork resources. ++ // Kind: [Output Only] Type of the resource. Always compute#storagePool ++ // for storage pools. + Kind string `json:"kind,omitempty"` + +- // LogConfig: This field denotes the VPC flow logging options for this +- // subnetwork. If logging is enabled, logs are exported to Cloud +- // Logging. +- LogConfig *SubnetworkLogConfig `json:"logConfig,omitempty"` ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // storage pool, which is essentially a hash of the labels set used for ++ // optimistic locking. The fingerprint is initially generated by Compute ++ // Engine and changes after every request to modify or update labels. ++ // You must always provide an up-to-date fingerprint hash in order to ++ // update or change labels, otherwise the request will fail with error ++ // 412 conditionNotMet. To see the latest fingerprint, make a get() ++ // request to retrieve a storage pool. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` + +- // Metadata: Can only be specified if VPC flow logging for this +- // subnetwork is enabled. Configures whether metadata fields should be +- // added to the reported VPC flow logs. Options are +- // INCLUDE_ALL_METADATA, EXCLUDE_ALL_METADATA, and CUSTOM_METADATA. +- // Default is EXCLUDE_ALL_METADATA. +- // +- // Possible values: +- // "EXCLUDE_ALL_METADATA" +- // "INCLUDE_ALL_METADATA" +- Metadata string `json:"metadata,omitempty"` ++ // Labels: Labels to apply to this storage pool. These can be later ++ // modified by the setLabels method. ++ Labels map[string]string `json:"labels,omitempty"` + +- // Name: The name of the resource, provided by the client when initially +- // creating the resource. The name must be 1-63 characters long, and +- // comply with RFC1035. Specifically, the name must be 1-63 characters +- // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` +- // which means the first character must be a lowercase letter, and all +- // following characters must be a dash, lowercase letter, or digit, +- // except the last character, which cannot be a dash. ++ // Name: Name of the resource. Provided by the client when the resource ++ // is created. The name must be 1-63 characters long, and comply with ++ // RFC1035. Specifically, the name must be 1-63 characters long and ++ // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means ++ // the first character must be a lowercase letter, and all following ++ // characters must be a dash, lowercase letter, or digit, except the ++ // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + +- // Network: The URL of the network to which this subnetwork belongs, +- // provided by the client when initially creating the subnetwork. This +- // field can be set only at resource creation time. +- Network string `json:"network,omitempty"` +- +- // PrivateIpGoogleAccess: Whether the VMs in this subnet can access +- // Google services without assigned external IP addresses. This field +- // can be both set at resource creation time and updated using +- // setPrivateIpGoogleAccess. +- PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` +- +- // PrivateIpv6GoogleAccess: This field is for internal use. This field +- // can be both set at resource creation time and updated using patch. +- // +- // Possible values: +- // "DISABLE_GOOGLE_ACCESS" - Disable private IPv6 access to/from +- // Google services. +- // "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE" - Bidirectional private +- // IPv6 access to/from Google services. +- // "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE" - Outbound private IPv6 +- // access from VMs in this subnet to Google services. +- PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` +- +- // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. +- // +- // Possible values: +- // "AGGREGATE" - Subnetwork used to aggregate multiple private +- // subnetworks. +- // "CLOUD_EXTENSION" - Subnetworks created for Cloud Extension +- // Machines. +- // "GLOBAL_MANAGED_PROXY" - Subnet reserved for Global Internal +- // HTTP(S) Load Balancing. +- // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal +- // HTTP(S) Load Balancing. +- // "PRIVATE" - Regular user created or automatically created subnet. +- // "PRIVATE_NAT" - Subnetwork used as source range for Private NAT +- // Gateways. +- // "PRIVATE_RFC_1918" - Regular user created or automatically created +- // subnet. +- // "PRIVATE_SERVICE_CONNECT" - Subnetworks created for Private Service +- // Connect in the producer network. +- // "REGIONAL_MANAGED_PROXY" - Subnetwork used for Regional +- // Internal/External HTTP(S) Load Balancing. +- Purpose string `json:"purpose,omitempty"` +- +- // Region: URL of the region where the Subnetwork resides. This field +- // can be set only at resource creation time. +- Region string `json:"region,omitempty"` +- +- // ReservedInternalRange: The URL of the reserved internal range. +- ReservedInternalRange string `json:"reservedInternalRange,omitempty"` +- +- // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one +- // that is ready to be promoted to ACTIVE or is currently draining. This +- // field can be updated with a patch request. +- // +- // Possible values: +- // "ACTIVE" - The ACTIVE subnet that is currently used. +- // "BACKUP" - The BACKUP subnet that could be promoted to ACTIVE. +- Role string `json:"role,omitempty"` ++ // ProvisionedIops: Provsioned IOPS of the storage pool. ++ ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + +- // SecondaryIpRanges: An array of configurations for secondary IP ranges +- // for VM instances contained in this subnetwork. The primary IP of such +- // VM must belong to the primary ipCidrRange of the subnetwork. The +- // alias IPs may belong to either primary or secondary ranges. This +- // field can be updated with a patch request. +- SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` ++ // ResourceStatus: [Output Only] Status information for the storage pool ++ // resource. ++ ResourceStatus *StoragePoolResourceStatus `json:"resourceStatus,omitempty"` + +- // SelfLink: [Output Only] Server-defined URL for the resource. ++ // SelfLink: [Output Only] Server-defined fully-qualified URL for this ++ // resource. + SelfLink string `json:"selfLink,omitempty"` + +- // SelfLinkWithId: [Output Only] Server-defined URL for this resource +- // with the resource id. ++ // SelfLinkWithId: [Output Only] Server-defined URL for this resource's ++ // resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + +- // StackType: The stack type for the subnet. If set to IPV4_ONLY, new +- // VMs in the subnet are assigned IPv4 addresses only. If set to +- // IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 +- // addresses. If not specified, IPV4_ONLY is used. This field can be +- // both set at resource creation time and updated using patch. ++ // SizeGb: Size, in GiB, of the storage pool. ++ SizeGb int64 `json:"sizeGb,omitempty,string"` ++ ++ // State: [Output Only] The status of storage pool creation. - CREATING: ++ // Storage pool is provisioning. storagePool. - FAILED: Storage pool ++ // creation failed. - READY: Storage pool is ready for use. - DELETING: ++ // Storage pool is deleting. + // + // Possible values: +- // "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6 +- // addresses. +- // "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 +- // addresses. +- StackType string `json:"stackType,omitempty"` ++ // "CREATING" - StoragePool is provisioning ++ // "DELETING" - StoragePool is deleting. ++ // "FAILED" - StoragePool creation failed. ++ // "READY" - StoragePool is ready for use. ++ State string `json:"state,omitempty"` + +- // State: [Output Only] The state of the subnetwork, which can be one of +- // the following values: READY: Subnetwork is created and ready to use +- // DRAINING: only applicable to subnetworks that have the purpose set to +- // INTERNAL_HTTPS_LOAD_BALANCER and indicates that connections to the +- // load balancer are being drained. A subnetwork that is draining cannot +- // be used or modified until it reaches a status of READY ++ // Type: Type of the storage pool + // + // Possible values: +- // "DRAINING" - Subnetwork is being drained. +- // "READY" - Subnetwork is ready for use. +- State string `json:"state,omitempty"` ++ // "SSD" ++ // "UNSPECIFIED" ++ Type string `json:"type,omitempty"` + +- // Vlans: A repeated field indicating the VLAN IDs supported on this +- // subnetwork. During Subnet creation, specifying vlan is valid only if +- // enable_l2 is true. During Subnet Update, specifying vlan is allowed +- // only for l2 enabled subnets. Restricted to only one VLAN. +- Vlans []int64 `json:"vlans,omitempty"` ++ // Zone: [Output Only] URL of the zone where the storage pool resides. ++ // You must specify this field as part of the HTTP request URL. It is ++ // not settable as a field in the request body. ++ Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + +- // ForceSendFields is a list of field names (e.g. "AggregationInterval") ++ // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -60130,7 +60721,7 @@ type Subnetwork struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "AggregationInterval") to ++ // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the +@@ -60140,36 +60731,25 @@ type Subnetwork struct { + NullFields []string `json:"-"` + } + +-func (s *Subnetwork) MarshalJSON() ([]byte, error) { +- type NoMethod Subnetwork ++func (s *StoragePool) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePool + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-func (s *Subnetwork) UnmarshalJSON(data []byte) error { +- type NoMethod Subnetwork +- var s1 struct { +- FlowSampling gensupport.JSONFloat64 `json:"flowSampling"` +- *NoMethod +- } +- s1.NoMethod = (*NoMethod)(s) +- if err := json.Unmarshal(data, &s1); err != nil { +- return err +- } +- s.FlowSampling = float64(s1.FlowSampling) +- return nil +-} ++type StoragePoolAggregatedList struct { ++ Etag string `json:"etag,omitempty"` + +-type SubnetworkAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + +- // Items: A list of SubnetworksScopedList resources. +- Items map[string]SubnetworksScopedList `json:"items,omitempty"` ++ // Items: A list of StoragePoolsScopedList resources. ++ Items map[string]StoragePoolsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always +- // compute#subnetworkAggregatedList for aggregated lists of subnetworks. ++ // compute#storagePoolAggregatedList for aggregated lists of storage ++ // pools. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next +@@ -60187,13 +60767,13 @@ type SubnetworkAggregatedList struct { + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. +- Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` ++ Warning *StoragePoolAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + +- // ForceSendFields is a list of field names (e.g. "Id") to ++ // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -60201,7 +60781,7 @@ type SubnetworkAggregatedList struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Id") to include in API ++ // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as +@@ -60210,15 +60790,942 @@ type SubnetworkAggregatedList struct { + NullFields []string `json:"-"` + } + +-func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { +- type NoMethod SubnetworkAggregatedList ++func (s *StoragePoolAggregatedList) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// SubnetworkAggregatedListWarning: [Output Only] Informational warning ++// StoragePoolAggregatedListWarning: [Output Only] Informational warning + // message. +-type SubnetworkAggregatedListWarning struct { ++type StoragePoolAggregatedListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*StoragePoolAggregatedListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolAggregatedListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolAggregatedListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type StoragePoolAggregatedListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolAggregatedListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// StoragePoolList: A list of StoragePool resources. ++type StoragePoolList struct { ++ Etag string `json:"etag,omitempty"` ++ ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of StoragePool resources. ++ Items []*StoragePool `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always compute#storagePoolList ++ // for lists of storagePools. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token allows you to get the next ++ // page of results for list requests. If the number of results is larger ++ // than maxResults, use the nextPageToken as a value for the query ++ // parameter pageToken in the next list request. Subsequent list ++ // requests will have their own nextPageToken to continue paging through ++ // the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Unreachables: [Output Only] Unreachable resources. end_interface: ++ // MixerListResponseWithEtagBuilder ++ Unreachables []string `json:"unreachables,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *StoragePoolListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Etag") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Etag") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolList) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// StoragePoolListWarning: [Output Only] Informational warning message. ++type StoragePoolListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*StoragePoolListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type StoragePoolListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// StoragePoolResourceStatus: [Output Only] Contains output only fields. ++type StoragePoolResourceStatus struct { ++ // AggregateDiskProvisionedIops: [Output Only] Sum of all the disk' ++ // provisioned IOPS. ++ AggregateDiskProvisionedIops int64 `json:"aggregateDiskProvisionedIops,omitempty,string"` ++ ++ // AggregateDiskSizeGb: [Output Only] Sum of all the capacity ++ // provisioned in disks in this storage pool. A disk's provisioned ++ // capacity is the same as its total capacity. ++ AggregateDiskSizeGb int64 `json:"aggregateDiskSizeGb,omitempty,string"` ++ ++ // LastResizeTimestamp: [Output Only] Timestamp of the last successful ++ // resize in RFC3339 text format. ++ LastResizeTimestamp string `json:"lastResizeTimestamp,omitempty"` ++ ++ // MaxAggregateDiskSizeGb: [Output Only] Maximum allowed aggregate disk ++ // size in gigabytes. ++ MaxAggregateDiskSizeGb int64 `json:"maxAggregateDiskSizeGb,omitempty,string"` ++ ++ // NumberOfDisks: [Output Only] Number of disks used. ++ NumberOfDisks int64 `json:"numberOfDisks,omitempty,string"` ++ ++ // UsedBytes: [Output Only] Space used by data stored in disks within ++ // the storage pool (in bytes). ++ UsedBytes int64 `json:"usedBytes,omitempty,string"` ++ ++ // UsedReducedBytes: [Output Only] Space used by compressed and deduped ++ // data stored in disks within the storage pool (in bytes). ++ UsedReducedBytes int64 `json:"usedReducedBytes,omitempty,string"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "AggregateDiskProvisionedIops") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. ++ // "AggregateDiskProvisionedIops") to include in API requests with the ++ // JSON null value. By default, fields with empty values are omitted ++ // from API requests. However, any field with an empty value appearing ++ // in NullFields will be sent to the server as null. It is an error if a ++ // field in this list has a non-empty value. This may be used to include ++ // null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolResourceStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolResourceStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type StoragePoolsScopedList struct { ++ // StoragePools: [Output Only] A list of storage pool contained in this ++ // scope. ++ StoragePools []*StoragePool `json:"storagePools,omitempty"` ++ ++ // Warning: [Output Only] Informational warning which replaces the list ++ // of storage pool when the list is empty. ++ Warning *StoragePoolsScopedListWarning `json:"warning,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "StoragePools") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "StoragePools") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolsScopedList) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolsScopedList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// StoragePoolsScopedListWarning: [Output Only] Informational warning ++// which replaces the list of storage pool when the list is empty. ++type StoragePoolsScopedListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*StoragePoolsScopedListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolsScopedListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolsScopedListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type StoragePoolsScopedListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolsScopedListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolsScopedListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// Subnetwork: Represents a Subnetwork resource. A subnetwork (also ++// known as a subnet) is a logical partition of a Virtual Private Cloud ++// network with one primary IP range and zero or more secondary IP ++// ranges. For more information, read Virtual Private Cloud (VPC) ++// Network. ++type Subnetwork struct { ++ // AggregationInterval: Can only be specified if VPC flow logging for ++ // this subnetwork is enabled. Sets the aggregation interval for ++ // collecting flow logs. Increasing the interval time reduces the amount ++ // of generated flow logs for long-lasting connections. Default is an ++ // interval of 5 seconds per connection. Valid values: INTERVAL_5_SEC, ++ // INTERVAL_30_SEC, INTERVAL_1_MIN, INTERVAL_5_MIN, INTERVAL_10_MIN, ++ // INTERVAL_15_MIN. ++ // ++ // Possible values: ++ // "INTERVAL_10_MIN" ++ // "INTERVAL_15_MIN" ++ // "INTERVAL_1_MIN" ++ // "INTERVAL_30_SEC" ++ // "INTERVAL_5_MIN" ++ // "INTERVAL_5_SEC" ++ AggregationInterval string `json:"aggregationInterval,omitempty"` ++ ++ // AllowSubnetCidrRoutesOverlap: Whether this subnetwork's ranges can ++ // conflict with existing static routes. Setting this to true allows ++ // this subnetwork's primary and secondary ranges to overlap with (and ++ // contain) static routes that have already been configured on the ++ // corresponding network. For example if a static route has range ++ // 10.1.0.0/16, a subnet range 10.0.0.0/8 could only be created if ++ // allow_conflicting_routes=true. Overlapping is only allowed on ++ // subnetwork operations; routes whose ranges conflict with this ++ // subnetwork's ranges won't be allowed unless ++ // route.allow_conflicting_subnetworks is set to true. Typically packets ++ // destined to IPs within the subnetwork (which may contain ++ // private/sensitive data) are prevented from leaving the virtual ++ // network. Setting this field to true will disable this feature. The ++ // default value is false and applies to all existing subnetworks and ++ // automatically created subnetworks. This field cannot be set to true ++ // at resource creation time. ++ AllowSubnetCidrRoutesOverlap bool `json:"allowSubnetCidrRoutesOverlap,omitempty"` ++ ++ // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text ++ // format. ++ CreationTimestamp string `json:"creationTimestamp,omitempty"` ++ ++ // Description: An optional description of this resource. Provide this ++ // property when you create the resource. This field can be set only at ++ // resource creation time. ++ Description string `json:"description,omitempty"` ++ ++ // EnableFlowLogs: Whether to enable flow logging for this subnetwork. ++ // If this field is not explicitly set, it will not appear in get ++ // listings. If not set the default behavior is determined by the org ++ // policy, if there is no org policy specified, then it will default to ++ // disabled. This field isn't supported if the subnet purpose field is ++ // set to REGIONAL_MANAGED_PROXY. ++ EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` ++ ++ // EnableL2: Enables Layer2 communication on the subnetwork. ++ EnableL2 bool `json:"enableL2,omitempty"` ++ ++ // EnablePrivateV6Access: Deprecated in favor of enable in ++ // PrivateIpv6GoogleAccess. Whether the VMs in this subnet can directly ++ // access Google services via internal IPv6 addresses. This field can be ++ // both set at resource creation time and updated using patch. ++ EnablePrivateV6Access bool `json:"enablePrivateV6Access,omitempty"` ++ ++ // ExternalIpv6Prefix: The external IPv6 address range that is owned by ++ // this subnetwork. ++ ExternalIpv6Prefix string `json:"externalIpv6Prefix,omitempty"` ++ ++ // Fingerprint: Fingerprint of this resource. A hash of the contents ++ // stored in this object. This field is used in optimistic locking. This ++ // field will be ignored when inserting a Subnetwork. An up-to-date ++ // fingerprint must be provided in order to update the Subnetwork, ++ // otherwise the request will fail with error 412 conditionNotMet. To ++ // see the latest fingerprint, make a get() request to retrieve a ++ // Subnetwork. ++ Fingerprint string `json:"fingerprint,omitempty"` ++ ++ // FlowSampling: Can only be specified if VPC flow logging for this ++ // subnetwork is enabled. The value of the field must be in [0, 1]. Set ++ // the sampling rate of VPC flow logs within the subnetwork where 1.0 ++ // means all collected logs are reported and 0.0 means no logs are ++ // reported. Default is 0.5 unless otherwise specified by the org ++ // policy, which means half of all collected logs are reported. ++ FlowSampling float64 `json:"flowSampling,omitempty"` ++ ++ // GatewayAddress: [Output Only] The gateway address for default routes ++ // to reach destination addresses outside this subnetwork. ++ GatewayAddress string `json:"gatewayAddress,omitempty"` ++ ++ // Id: [Output Only] The unique identifier for the resource. This ++ // identifier is defined by the server. ++ Id uint64 `json:"id,omitempty,string"` ++ ++ // InternalIpv6Prefix: [Output Only] The internal IPv6 address range ++ // that is assigned to this subnetwork. ++ InternalIpv6Prefix string `json:"internalIpv6Prefix,omitempty"` ++ ++ // IpCidrRange: The range of internal addresses that are owned by this ++ // subnetwork. Provide this property when you create the subnetwork. For ++ // example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and ++ // non-overlapping within a network. Only IPv4 is supported. This field ++ // is set at resource creation time. The range can be any range listed ++ // in the Valid ranges list. The range can be expanded after creation ++ // using expandIpCidrRange. ++ IpCidrRange string `json:"ipCidrRange,omitempty"` ++ ++ // Ipv6AccessType: The access type of IPv6 address this subnet holds. ++ // It's immutable and can only be specified during creation or the first ++ // time the subnet is updated into IPV4_IPV6 dual stack. ++ // ++ // Possible values: ++ // "EXTERNAL" - VMs on this subnet will be assigned IPv6 addresses ++ // that are accessible via the Internet, as well as the VPC network. ++ // "INTERNAL" - VMs on this subnet will be assigned IPv6 addresses ++ // that are only accessible over the VPC network. ++ Ipv6AccessType string `json:"ipv6AccessType,omitempty"` ++ ++ // Ipv6CidrRange: [Output Only] This field is for internal use. ++ Ipv6CidrRange string `json:"ipv6CidrRange,omitempty"` ++ ++ // Kind: [Output Only] Type of the resource. Always compute#subnetwork ++ // for Subnetwork resources. ++ Kind string `json:"kind,omitempty"` ++ ++ // LogConfig: This field denotes the VPC flow logging options for this ++ // subnetwork. If logging is enabled, logs are exported to Cloud ++ // Logging. ++ LogConfig *SubnetworkLogConfig `json:"logConfig,omitempty"` ++ ++ // Metadata: Can only be specified if VPC flow logging for this ++ // subnetwork is enabled. Configures whether metadata fields should be ++ // added to the reported VPC flow logs. Options are ++ // INCLUDE_ALL_METADATA, EXCLUDE_ALL_METADATA, and CUSTOM_METADATA. ++ // Default is EXCLUDE_ALL_METADATA. ++ // ++ // Possible values: ++ // "EXCLUDE_ALL_METADATA" ++ // "INCLUDE_ALL_METADATA" ++ Metadata string `json:"metadata,omitempty"` ++ ++ // Name: The name of the resource, provided by the client when initially ++ // creating the resource. The name must be 1-63 characters long, and ++ // comply with RFC1035. Specifically, the name must be 1-63 characters ++ // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` ++ // which means the first character must be a lowercase letter, and all ++ // following characters must be a dash, lowercase letter, or digit, ++ // except the last character, which cannot be a dash. ++ Name string `json:"name,omitempty"` ++ ++ // Network: The URL of the network to which this subnetwork belongs, ++ // provided by the client when initially creating the subnetwork. This ++ // field can be set only at resource creation time. ++ Network string `json:"network,omitempty"` ++ ++ // PrivateIpGoogleAccess: Whether the VMs in this subnet can access ++ // Google services without assigned external IP addresses. This field ++ // can be both set at resource creation time and updated using ++ // setPrivateIpGoogleAccess. ++ PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` ++ ++ // PrivateIpv6GoogleAccess: This field is for internal use. This field ++ // can be both set at resource creation time and updated using patch. ++ // ++ // Possible values: ++ // "DISABLE_GOOGLE_ACCESS" - Disable private IPv6 access to/from ++ // Google services. ++ // "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE" - Bidirectional private ++ // IPv6 access to/from Google services. ++ // "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE" - Outbound private IPv6 ++ // access from VMs in this subnet to Google services. ++ PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` ++ ++ // Purpose: The purpose of the resource. This field can be either ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. ++ // ++ // Possible values: ++ // "AGGREGATE" - Subnetwork used to aggregate multiple private ++ // subnetworks. ++ // "CLOUD_EXTENSION" - Subnetworks created for Cloud Extension ++ // Machines. ++ // "GLOBAL_MANAGED_PROXY" - Subnet reserved for Global Internal ++ // HTTP(S) Load Balancing. ++ // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal ++ // HTTP(S) Load Balancing. ++ // "PRIVATE" - Regular user created or automatically created subnet. ++ // "PRIVATE_NAT" - Subnetwork used as source range for Private NAT ++ // Gateways. ++ // "PRIVATE_RFC_1918" - Regular user created or automatically created ++ // subnet. ++ // "PRIVATE_SERVICE_CONNECT" - Subnetworks created for Private Service ++ // Connect in the producer network. ++ // "REGIONAL_MANAGED_PROXY" - Subnetwork used for Regional ++ // Internal/External HTTP(S) Load Balancing. ++ Purpose string `json:"purpose,omitempty"` ++ ++ // Region: URL of the region where the Subnetwork resides. This field ++ // can be set only at resource creation time. ++ Region string `json:"region,omitempty"` ++ ++ // ReservedInternalRange: The URL of the reserved internal range. ++ ReservedInternalRange string `json:"reservedInternalRange,omitempty"` ++ ++ // Role: The role of subnetwork. Currently, this field is only used when ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one ++ // that is ready to be promoted to ACTIVE or is currently draining. This ++ // field can be updated with a patch request. ++ // ++ // Possible values: ++ // "ACTIVE" - The ACTIVE subnet that is currently used. ++ // "BACKUP" - The BACKUP subnet that could be promoted to ACTIVE. ++ Role string `json:"role,omitempty"` ++ ++ // SecondaryIpRanges: An array of configurations for secondary IP ranges ++ // for VM instances contained in this subnetwork. The primary IP of such ++ // VM must belong to the primary ipCidrRange of the subnetwork. The ++ // alias IPs may belong to either primary or secondary ranges. This ++ // field can be updated with a patch request. ++ SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for the resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // SelfLinkWithId: [Output Only] Server-defined URL for this resource ++ // with the resource id. ++ SelfLinkWithId string `json:"selfLinkWithId,omitempty"` ++ ++ // StackType: The stack type for the subnet. If set to IPV4_ONLY, new ++ // VMs in the subnet are assigned IPv4 addresses only. If set to ++ // IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 ++ // addresses. If not specified, IPV4_ONLY is used. This field can be ++ // both set at resource creation time and updated using patch. ++ // ++ // Possible values: ++ // "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6 ++ // addresses. ++ // "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 ++ // addresses. ++ StackType string `json:"stackType,omitempty"` ++ ++ // State: [Output Only] The state of the subnetwork, which can be one of ++ // the following values: READY: Subnetwork is created and ready to use ++ // DRAINING: only applicable to subnetworks that have the purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER and indicates that connections to the ++ // load balancer are being drained. A subnetwork that is draining cannot ++ // be used or modified until it reaches a status of READY ++ // ++ // Possible values: ++ // "DRAINING" - Subnetwork is being drained. ++ // "READY" - Subnetwork is ready for use. ++ State string `json:"state,omitempty"` ++ ++ // Vlans: A repeated field indicating the VLAN IDs supported on this ++ // subnetwork. During Subnet creation, specifying vlan is valid only if ++ // enable_l2 is true. During Subnet Update, specifying vlan is allowed ++ // only for l2 enabled subnets. Restricted to only one VLAN. ++ Vlans []int64 `json:"vlans,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "AggregationInterval") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AggregationInterval") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *Subnetwork) MarshalJSON() ([]byte, error) { ++ type NoMethod Subnetwork ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++func (s *Subnetwork) UnmarshalJSON(data []byte) error { ++ type NoMethod Subnetwork ++ var s1 struct { ++ FlowSampling gensupport.JSONFloat64 `json:"flowSampling"` ++ *NoMethod ++ } ++ s1.NoMethod = (*NoMethod)(s) ++ if err := json.Unmarshal(data, &s1); err != nil { ++ return err ++ } ++ s.FlowSampling = float64(s1.FlowSampling) ++ return nil ++} ++ ++type SubnetworkAggregatedList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of SubnetworksScopedList resources. ++ Items map[string]SubnetworksScopedList `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always ++ // compute#subnetworkAggregatedList for aggregated lists of subnetworks. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token allows you to get the next ++ // page of results for list requests. If the number of results is larger ++ // than maxResults, use the nextPageToken as a value for the query ++ // parameter pageToken in the next list request. Subsequent list ++ // requests will have their own nextPageToken to continue paging through ++ // the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Unreachables: [Output Only] Unreachable resources. ++ Unreachables []string `json:"unreachables,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { ++ type NoMethod SubnetworkAggregatedList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// SubnetworkAggregatedListWarning: [Output Only] Informational warning ++// message. ++type SubnetworkAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. +@@ -60563,6 +62070,8 @@ type SubnetworkLogConfig struct { + // field is not explicitly set, it will not appear in get listings. If + // not set the default behavior is determined by the org policy, if + // there is no org policy specified, then it will default to disabled. ++ // Flow logging isn't supported if the subnet purpose field is set to ++ // REGIONAL_MANAGED_PROXY. + Enable bool `json:"enable,omitempty"` + + // FilterExpr: Can only be specified if VPC flow logs for this +@@ -61575,6 +63084,15 @@ type TargetHttpProxy struct { + // ForwardingRule for more details. + HttpFilters []string `json:"httpFilters,omitempty"` + ++ // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection ++ // open, after completing a response, while there is no matching traffic ++ // (in seconds). If an HTTP keep-alive is not specified, a default value ++ // (610 seconds) will be used. For Global external HTTP(S) load ++ // balancer, the minimum allowed value is 5 seconds and the maximum ++ // allowed value is 1200 seconds. For Global external HTTP(S) load ++ // balancer (classic), this option is not available publicly. ++ HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` ++ + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` +@@ -62201,7 +63719,9 @@ func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) + + type TargetHttpsProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetHttpsProxy. ++ // TargetHttpsProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -62324,7 +63844,9 @@ type TargetHttpsProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -62359,6 +63881,15 @@ type TargetHttpsProxy struct { + // ForwardingRule for more details. + HttpFilters []string `json:"httpFilters,omitempty"` + ++ // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection ++ // open, after completing a response, while there is no matching traffic ++ // (in seconds). If an HTTP keep-alive is not specified, a default value ++ // (610 seconds) will be used. For Global external HTTP(S) load ++ // balancer, the minimum allowed value is 5 seconds and the maximum ++ // allowed value is 1200 seconds. For Global external HTTP(S) load ++ // balancer (classic), this option is not available publicly. ++ HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` ++ + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` +@@ -62421,9 +63952,11 @@ type TargetHttpsProxy struct { + // networksecurity.ServerTlsPolicy resource that describes how the proxy + // should authenticate inbound traffic. serverTlsPolicy only applies to + // a global TargetHttpsProxy attached to globalForwardingRules with the +- // loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, +- // communications are not encrypted. Note: This field currently has no +- // impact. ++ // loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or ++ // EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are ++ // accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, ++ // EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy ++ // documentation. If left blank, communications are not encrypted. + ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to +@@ -64425,7 +65958,9 @@ func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) + + type TargetSslProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetSslProxy. ++ // TargetSslProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -64523,7 +66058,9 @@ type TargetSslProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -67662,12 +69199,20 @@ type UsableSubnetwork struct { + Network string `json:"network,omitempty"` + + // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. + // + // Possible values: + // "AGGREGATE" - Subnetwork used to aggregate multiple private +@@ -67690,9 +69235,9 @@ type UsableSubnetwork struct { + Purpose string `json:"purpose,omitempty"` + + // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // +@@ -68428,6 +69973,7 @@ type VpnGateway struct { + // Possible values: + // "IPV4_IPV6" - Enable VPN gateway with both IPv4 and IPv6 protocols. + // "IPV4_ONLY" - Enable VPN gateway with only IPv4 protocol. ++ // "IPV6_ONLY" - Enable VPN gateway with only IPv6 protocol. + StackType string `json:"stackType,omitempty"` + + // VpnInterfaces: The list of VPN interfaces associated with this VPN +@@ -68931,7 +70477,7 @@ type VpnGatewayStatusTunnel struct { + + // PeerGatewayInterface: The peer gateway interface this VPN tunnel is + // connected to, the peer gateway could either be an external VPN +- // gateway or GCP VPN gateway. ++ // gateway or a Google Cloud VPN gateway. + PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` + + // TunnelUrl: URL reference to the VPN tunnel. +@@ -68964,8 +70510,8 @@ func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { + + // VpnGatewayStatusVpnConnection: A VPN connection contains all VPN + // tunnels connected from this VpnGateway to the same peer gateway. The +-// peer gateway could either be a external VPN gateway or GCP VPN +-// gateway. ++// peer gateway could either be an external VPN gateway or a Google ++// Cloud VPN gateway. + type VpnGatewayStatusVpnConnection struct { + // PeerExternalGateway: URL reference to the peer external VPN gateways + // to which the VPN tunnels in this VPN connection are connected. This +@@ -93130,9 +94676,7 @@ func (r *GlobalAddressesService) GetOwnerInstance(project string) *GlobalAddress + return c + } + +-// IpAddress sets the optional parameter "ipAddress": The ip_address +-// could be external IPv4, or internal IPv4 within IPv6 form of +-// virtual_network_id with internal IPv4. IPv6 is not supported yet. ++// IpAddress sets the optional parameter "ipAddress": The VM IP address. + func (c *GlobalAddressesGetOwnerInstanceCall) IpAddress(ipAddress string) *GlobalAddressesGetOwnerInstanceCall { + c.urlParams_.Set("ipAddress", ipAddress) + return c +@@ -93246,7 +94790,7 @@ func (c *GlobalAddressesGetOwnerInstanceCall) Do(opts ...googleapi.CallOption) ( + // ], + // "parameters": { + // "ipAddress": { +- // "description": "The ip_address could be external IPv4, or internal IPv4 within IPv6 form of virtual_network_id with internal IPv4. IPv6 is not supported yet.", ++ // "description": "The VM IP address.", + // "location": "query", + // "type": "string" + // }, +@@ -111205,6 +112749,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C + } + return ret, nil + // { ++ // "deprecated": true, + // "description": "Motifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use instanceGroupManagers.patch instead.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "httpMethod": "POST", +@@ -123543,32 +125088,33 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio + + } + +-// method id "compute.instances.setServiceAccount": ++// method id "compute.instances.setSecurityPolicy": + +-type InstancesSetServiceAccountCall struct { ++type InstancesSetSecurityPolicyCall struct { + s *Service + project string + zone string + instance string +- instancessetserviceaccountrequest *InstancesSetServiceAccountRequest ++ instancessetsecuritypolicyrequest *InstancesSetSecurityPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// SetServiceAccount: Sets the service account on the instance. For more +-// information, read Changing the service account and access scopes for +-// an instance. ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified instance. For more information, see Google Cloud Armor ++// Overview + // +-// - instance: Name of the instance resource to start. +-// - project: Project ID for this request. +-// - zone: The name of the zone for this request. +-func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { +- c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instance: Name of the Instance resource to which the security ++// policy should be set. The name should conform to RFC1035. ++// - project: Project ID for this request. ++// - zone: Name of the zone scoping this request. ++func (r *InstancesService) SetSecurityPolicy(project string, zone string, instance string, instancessetsecuritypolicyrequest *InstancesSetSecurityPolicyRequest) *InstancesSetSecurityPolicyCall { ++ c := &InstancesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.instancessetserviceaccountrequest = instancessetserviceaccountrequest ++ c.instancessetsecuritypolicyrequest = instancessetsecuritypolicyrequest + return c + } + +@@ -123583,7 +125129,7 @@ func (r *InstancesService) SetServiceAccount(project string, zone string, instan + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) RequestId(requestId string) *InstancesSetSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -123591,7 +125137,7 @@ func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesS + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetSecurityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -123599,21 +125145,21 @@ func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *Instances + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) Context(ctx context.Context) *InstancesSetSecurityPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetServiceAccountCall) Header() http.Header { ++func (c *InstancesSetSecurityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -123621,14 +125167,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetsecuritypolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -123643,14 +125189,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setServiceAccount" call. ++// Do executes the "compute.instances.setSecurityPolicy" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123681,10 +125227,10 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + } + return ret, nil + // { +- // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "description": "Sets the Google Cloud Armor security policy for the specified instance. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + // "httpMethod": "POST", +- // "id": "compute.instances.setServiceAccount", ++ // "id": "compute.instances.setSecurityPolicy", + // "parameterOrder": [ + // "project", + // "zone", +@@ -123692,9 +125238,8 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + // ], + // "parameters": { + // "instance": { +- // "description": "Name of the instance resource to start.", ++ // "description": "Name of the Instance resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -123711,16 +125256,16 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + // "type": "string" + // }, + // "zone": { +- // "description": "The name of the zone for this request.", ++ // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + // "request": { +- // "$ref": "InstancesSetServiceAccountRequest" ++ // "$ref": "InstancesSetSecurityPolicyRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -123733,33 +125278,32 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + + } + +-// method id "compute.instances.setShieldedInstanceIntegrityPolicy": ++// method id "compute.instances.setServiceAccount": + +-type InstancesSetShieldedInstanceIntegrityPolicyCall struct { +- s *Service +- project string +- zone string +- instance string +- shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesSetServiceAccountCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ instancessetserviceaccountrequest *InstancesSetServiceAccountRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance +-// integrity policy for an instance. You can only use this method on a +-// running instance. This method supports PATCH semantics and uses the +-// JSON merge patch format and processing rules. ++// SetServiceAccount: Sets the service account on the instance. For more ++// information, read Changing the service account and access scopes for ++// an instance. + // +-// - instance: Name or id of the instance scoping this request. ++// - instance: Name of the instance resource to start. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { +- c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { ++ c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy ++ c.instancessetserviceaccountrequest = instancessetserviceaccountrequest + return c + } + +@@ -123774,7 +125318,7 @@ func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zo + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -123782,7 +125326,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId st + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -123790,21 +125334,21 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi. + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { ++func (c *InstancesSetServiceAccountCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -123812,16 +125356,16 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -123834,14 +125378,14 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. ++// Do executes the "compute.instances.setServiceAccount" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123872,10 +125416,10 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + } + return ret, nil + // { +- // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", +- // "httpMethod": "PATCH", +- // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", ++ // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "httpMethod": "POST", ++ // "id": "compute.instances.setServiceAccount", + // "parameterOrder": [ + // "project", + // "zone", +@@ -123883,7 +125427,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + // ], + // "parameters": { + // "instance": { +- // "description": "Name or id of the instance scoping this request.", ++ // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -123909,9 +125453,9 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "request": { +- // "$ref": "ShieldedInstanceIntegrityPolicy" ++ // "$ref": "InstancesSetServiceAccountRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -123924,33 +125468,33 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + + } + +-// method id "compute.instances.setShieldedVmIntegrityPolicy": ++// method id "compute.instances.setShieldedInstanceIntegrityPolicy": + +-type InstancesSetShieldedVmIntegrityPolicyCall struct { +- s *Service +- project string +- zone string +- instance string +- shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesSetShieldedInstanceIntegrityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy +-// for a VM instance. You can only use this method on a running VM +-// instance. This method supports PATCH semantics and uses the JSON +-// merge patch format and processing rules. ++// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance ++// integrity policy for an instance. You can only use this method on a ++// running instance. This method supports PATCH semantics and uses the ++// JSON merge patch format and processing rules. + // +-// - instance: Name of the instance scoping this request. ++// - instance: Name or id of the instance scoping this request. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { +- c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++ c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.shieldedvmintegritypolicy = shieldedvmintegritypolicy ++ c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy + return c + } + +@@ -123965,7 +125509,7 @@ func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone str + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -123973,7 +125517,7 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -123981,21 +125525,21 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124003,14 +125547,14 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { +@@ -124025,14 +125569,14 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. ++// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124063,10 +125607,10 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt + } + return ret, nil + // { +- // "description": "Sets the Shielded VM integrity policy for a VM instance. You can only use this method on a running VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", ++ // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "httpMethod": "PATCH", +- // "id": "compute.instances.setShieldedVmIntegrityPolicy", ++ // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + // "parameterOrder": [ + // "project", + // "zone", +@@ -124074,7 +125618,7 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt + // ], + // "parameters": { + // "instance": { +- // "description": "Name of the instance scoping this request.", ++ // "description": "Name or id of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -124100,9 +125644,9 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "request": { +- // "$ref": "ShieldedVmIntegrityPolicy" ++ // "$ref": "ShieldedInstanceIntegrityPolicy" + // }, + // "response": { + // "$ref": "Operation" +@@ -124115,31 +125659,33 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt + + } + +-// method id "compute.instances.setTags": ++// method id "compute.instances.setShieldedVmIntegrityPolicy": + +-type InstancesSetTagsCall struct { +- s *Service +- project string +- zone string +- instance string +- tags *Tags +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesSetShieldedVmIntegrityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetTags: Sets network tags for the specified instance to the data +-// included in the request. ++// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy ++// for a VM instance. You can only use this method on a running VM ++// instance. This method supports PATCH semantics and uses the JSON ++// merge patch format and processing rules. + // + // - instance: Name of the instance scoping this request. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { +- c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { ++ c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.tags = tags ++ c.shieldedvmintegritypolicy = shieldedvmintegritypolicy + return c + } + +@@ -124154,7 +125700,7 @@ func (r *InstancesService) SetTags(project string, zone string, instance string, + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -124162,7 +125708,7 @@ func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124170,21 +125716,21 @@ func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCal + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetTagsCall) Header() http.Header { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124192,16 +125738,16 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setTags") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } +@@ -124214,14 +125760,14 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setTags" call. ++// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124252,10 +125798,10 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err + } + return ret, nil + // { +- // "description": "Sets network tags for the specified instance to the data included in the request.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", +- // "httpMethod": "POST", +- // "id": "compute.instances.setTags", ++ // "description": "Sets the Shielded VM integrity policy for a VM instance. You can only use this method on a running VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", ++ // "httpMethod": "PATCH", ++ // "id": "compute.instances.setShieldedVmIntegrityPolicy", + // "parameterOrder": [ + // "project", + // "zone", +@@ -124289,9 +125835,9 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", + // "request": { +- // "$ref": "Tags" ++ // "$ref": "ShieldedVmIntegrityPolicy" + // }, + // "response": { + // "$ref": "Operation" +@@ -124304,29 +125850,31 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err + + } + +-// method id "compute.instances.simulateMaintenanceEvent": ++// method id "compute.instances.setTags": + +-type InstancesSimulateMaintenanceEventCall struct { ++type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string ++ tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// SimulateMaintenanceEvent: Simulates a host maintenance event on a VM. +-// For more information, see Simulate a host maintenance event. ++// SetTags: Sets network tags for the specified instance to the data ++// included in the request. + // + // - instance: Name of the instance scoping this request. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { +- c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { ++ c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance ++ c.tags = tags + return c + } + +@@ -124341,23 +125889,15 @@ func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { ++func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { + c.urlParams_.Set("requestId", requestId) + return c + } + +-// WithExtendedNotifications sets the optional parameter +-// "withExtendedNotifications": Determines whether the customers receive +-// notifications before migration. Only applicable to SF vms. +-func (c *InstancesSimulateMaintenanceEventCall) WithExtendedNotifications(withExtendedNotifications bool) *InstancesSimulateMaintenanceEventCall { +- c.urlParams_.Set("withExtendedNotifications", fmt.Sprint(withExtendedNotifications)) +- return c +-} +- + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { ++func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124365,21 +125905,21 @@ func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *In + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { ++func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { ++func (c *InstancesSetTagsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124387,9 +125927,14 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setTags") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -124404,14 +125949,14 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.simulateMaintenanceEvent" call. ++// Do executes the "compute.instances.setTags" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124442,10 +125987,10 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", ++ // "description": "Sets network tags for the specified instance to the data included in the request.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + // "httpMethod": "POST", +- // "id": "compute.instances.simulateMaintenanceEvent", ++ // "id": "compute.instances.setTags", + // "parameterOrder": [ + // "project", + // "zone", +@@ -124471,11 +126016,6 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) + // "location": "query", + // "type": "string" + // }, +- // "withExtendedNotifications": { +- // "description": "Determines whether the customers receive notifications before migration. Only applicable to SF vms.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", +@@ -124484,7 +126024,10 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", ++ // "request": { ++ // "$ref": "Tags" ++ // }, + // "response": { + // "$ref": "Operation" + // }, +@@ -124496,9 +126039,9 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.instances.start": ++// method id "compute.instances.simulateMaintenanceEvent": + +-type InstancesStartCall struct { ++type InstancesSimulateMaintenanceEventCall struct { + s *Service + project string + zone string +@@ -124508,14 +126051,14 @@ type InstancesStartCall struct { + header_ http.Header + } + +-// Start: Starts an instance that was stopped using the instances().stop +-// method. For more information, see Restart an instance. ++// SimulateMaintenanceEvent: Simulates a host maintenance event on a VM. ++// For more information, see Simulate a host maintenance event. + // +-// - instance: Name of the instance resource to start. ++// - instance: Name of the instance scoping this request. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { +- c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { ++ c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +@@ -124533,15 +126076,23 @@ func (r *InstancesService) Start(project string, zone string, instance string) * + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("requestId", requestId) + return c + } + ++// WithExtendedNotifications sets the optional parameter ++// "withExtendedNotifications": Determines whether the customers receive ++// notifications before migration. Only applicable to SF vms. ++func (c *InstancesSimulateMaintenanceEventCall) WithExtendedNotifications(withExtendedNotifications bool) *InstancesSimulateMaintenanceEventCall { ++ c.urlParams_.Set("withExtendedNotifications", fmt.Sprint(withExtendedNotifications)) ++ return c ++} ++ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124549,21 +126100,21 @@ func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesStartCall) Header() http.Header { ++func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124573,7 +126124,7 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -124588,14 +126139,14 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.start" call. ++// Do executes the "compute.instances.simulateMaintenanceEvent" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124626,10 +126177,10 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error + } + return ret, nil + // { +- // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/start", ++ // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "httpMethod": "POST", +- // "id": "compute.instances.start", ++ // "id": "compute.instances.simulateMaintenanceEvent", + // "parameterOrder": [ + // "project", + // "zone", +@@ -124637,7 +126188,7 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error + // ], + // "parameters": { + // "instance": { +- // "description": "Name of the instance resource to start.", ++ // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -124655,6 +126206,11 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error + // "location": "query", + // "type": "string" + // }, ++ // "withExtendedNotifications": { ++ // "description": "Determines whether the customers receive notifications before migration. Only applicable to SF vms.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", +@@ -124663,7 +126219,7 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/start", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "response": { + // "$ref": "Operation" + // }, +@@ -124675,32 +126231,29 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error + + } + +-// method id "compute.instances.startWithEncryptionKey": ++// method id "compute.instances.start": + +-type InstancesStartWithEncryptionKeyCall struct { +- s *Service +- project string +- zone string +- instance string +- instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesStartCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// StartWithEncryptionKey: Starts an instance that was stopped using the +-// instances().stop method. For more information, see Restart an +-// instance. ++// Start: Starts an instance that was stopped using the instances().stop ++// method. For more information, see Restart an instance. + // + // - instance: Name of the instance resource to start. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { +- c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { ++ c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest + return c + } + +@@ -124715,7 +126268,7 @@ func (r *InstancesService) StartWithEncryptionKey(project string, zone string, i + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { ++func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -124723,7 +126276,7 @@ func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *Insta + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { ++func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124731,21 +126284,21 @@ func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *Inst + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { ++func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { ++func (c *InstancesStartCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124753,14 +126306,9 @@ func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Respo + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -124775,14 +126323,14 @@ func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Respo + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.startWithEncryptionKey" call. ++// Do executes the "compute.instances.start" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124814,9 +126362,9 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( + return ret, nil + // { + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/start", + // "httpMethod": "POST", +- // "id": "compute.instances.startWithEncryptionKey", ++ // "id": "compute.instances.start", + // "parameterOrder": [ + // "project", + // "zone", +@@ -124850,10 +126398,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", +- // "request": { +- // "$ref": "InstancesStartWithEncryptionKeyRequest" +- // }, ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/start", + // "response": { + // "$ref": "Operation" + // }, +@@ -124865,41 +126410,32 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( + + } + +-// method id "compute.instances.stop": ++// method id "compute.instances.startWithEncryptionKey": + +-type InstancesStopCall struct { +- s *Service +- project string +- zone string +- instance string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesStartWithEncryptionKeyCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Stop: Stops a running instance, shutting it down cleanly, and allows +-// you to restart the instance at a later time. Stopped instances do not +-// incur VM usage charges while they are stopped. However, resources +-// that the VM is using, such as persistent disks and static IP +-// addresses, will continue to be charged until they are deleted. For +-// more information, see Stopping an instance. ++// StartWithEncryptionKey: Starts an instance that was stopped using the ++// instances().stop method. For more information, see Restart an ++// instance. + // +-// - instance: Name of the instance resource to stop. ++// - instance: Name of the instance resource to start. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { +- c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { ++ c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- return c +-} +- +-// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +-// true, discard the contents of any attached localSSD partitions. +-// Default value is false. +-func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { +- c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) ++ c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest + return c + } + +@@ -124914,7 +126450,7 @@ func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStop + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { ++func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -124922,7 +126458,7 @@ func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { ++func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124930,21 +126466,21 @@ func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { ++func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesStopCall) Header() http.Header { ++func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124952,9 +126488,14 @@ func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/stop") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -124969,14 +126510,14 @@ func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.stop" call. ++// Do executes the "compute.instances.startWithEncryptionKey" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -125007,23 +126548,18 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) + } + return ret, nil + // { +- // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/stop", ++ // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + // "httpMethod": "POST", +- // "id": "compute.instances.stop", ++ // "id": "compute.instances.startWithEncryptionKey", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { +- // "discardLocalSsd": { +- // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "instance": { +- // "description": "Name of the instance resource to stop.", ++ // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -125049,7 +126585,10 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/stop", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", ++ // "request": { ++ // "$ref": "InstancesStartWithEncryptionKeyRequest" ++ // }, + // "response": { + // "$ref": "Operation" + // }, +@@ -125061,9 +126600,9 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) + + } + +-// method id "compute.instances.suspend": ++// method id "compute.instances.stop": + +-type InstancesSuspendCall struct { ++type InstancesStopCall struct { + s *Service + project string + zone string +@@ -125073,20 +126612,18 @@ type InstancesSuspendCall struct { + header_ http.Header + } + +-// Suspend: This method suspends a running instance, saving its state to +-// persistent storage, and allows you to resume the instance at a later +-// time. Suspended instances have no compute costs (cores or RAM), and +-// incur only storage charges for the saved VM memory and localSSD data. +-// Any charged resources the virtual machine was using, such as +-// persistent disks and static IP addresses, will continue to be charged +-// while the instance is suspended. For more information, see Suspending +-// and resuming an instance. ++// Stop: Stops a running instance, shutting it down cleanly, and allows ++// you to restart the instance at a later time. Stopped instances do not ++// incur VM usage charges while they are stopped. However, resources ++// that the VM is using, such as persistent disks and static IP ++// addresses, will continue to be charged until they are deleted. For ++// more information, see Stopping an instance. + // +-// - instance: Name of the instance resource to suspend. ++// - instance: Name of the instance resource to stop. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { +- c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { ++ c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +@@ -125096,7 +126633,7 @@ func (r *InstancesService) Suspend(project string, zone string, instance string) + // DiscardLocalSsd sets the optional parameter "discardLocalSsd": If + // true, discard the contents of any attached localSSD partitions. + // Default value is false. +-func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { ++func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) + return c + } +@@ -125112,7 +126649,7 @@ func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesS + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { ++func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -125120,7 +126657,7 @@ func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { ++func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -125128,21 +126665,21 @@ func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCal + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { ++func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSuspendCall) Header() http.Header { ++func (c *InstancesStopCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -125152,7 +126689,7 @@ func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/suspend") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/stop") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -125167,14 +126704,14 @@ func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.suspend" call. ++// Do executes the "compute.instances.stop" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -125205,10 +126742,10 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err + } + return ret, nil + // { +- // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances have no compute costs (cores or RAM), and incur only storage charges for the saved VM memory and localSSD data. Any charged resources the virtual machine was using, such as persistent disks and static IP addresses, will continue to be charged while the instance is suspended. For more information, see Suspending and resuming an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/suspend", ++ // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/stop", + // "httpMethod": "POST", +- // "id": "compute.instances.suspend", ++ // "id": "compute.instances.stop", + // "parameterOrder": [ + // "project", + // "zone", +@@ -125221,7 +126758,205 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err + // "type": "boolean" + // }, + // "instance": { +- // "description": "Name of the instance resource to suspend.", ++ // "description": "Name of the instance resource to stop.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/stop", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.instances.suspend": ++ ++type InstancesSuspendCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Suspend: This method suspends a running instance, saving its state to ++// persistent storage, and allows you to resume the instance at a later ++// time. Suspended instances have no compute costs (cores or RAM), and ++// incur only storage charges for the saved VM memory and localSSD data. ++// Any charged resources the virtual machine was using, such as ++// persistent disks and static IP addresses, will continue to be charged ++// while the instance is suspended. For more information, see Suspending ++// and resuming an instance. ++// ++// - instance: Name of the instance resource to suspend. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { ++ c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.instance = instance ++ return c ++} ++ ++// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If ++// true, discard the contents of any attached localSSD partitions. ++// Default value is false. ++func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { ++ c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InstancesSuspendCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/suspend") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "instance": c.instance, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.instances.suspend" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances have no compute costs (cores or RAM), and incur only storage charges for the saved VM memory and localSSD data. Any charged resources the virtual machine was using, such as persistent disks and static IP addresses, will continue to be charged while the instance is suspended. For more information, see Suspending and resuming an instance.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/suspend", ++ // "httpMethod": "POST", ++ // "id": "compute.instances.suspend", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "instance" ++ // ], ++ // "parameters": { ++ // "discardLocalSsd": { ++ // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "instance": { ++ // "description": "Name of the instance resource to suspend.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -139125,38 +140860,56 @@ func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkA + } + } + +-// method id "compute.networkAttachments.setIamPolicy": ++// method id "compute.networkAttachments.patch": + +-type NetworkAttachmentsSetIamPolicyCall struct { +- s *Service +- project string +- region string +- resource string +- regionsetpolicyrequest *RegionSetPolicyRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsPatchCall struct { ++ s *Service ++ project string ++ region string ++ networkAttachment string ++ networkattachment *NetworkAttachment ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetIamPolicy: Sets the access control policy on the specified +-// resource. Replaces any existing policy. ++// Patch: Patches the specified NetworkAttachment resource with the data ++// included in the request. This method supports PATCH semantics and ++// uses JSON merge patch format and processing rules. + // ++// - networkAttachment: Name of the NetworkAttachment resource to patch. + // - project: Project ID for this request. +-// - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *NetworkAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NetworkAttachmentsSetIamPolicyCall { +- c := &NetworkAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region for this request. ++func (r *NetworkAttachmentsService) Patch(project string, region string, networkAttachment string, networkattachment *NetworkAttachment) *NetworkAttachmentsPatchCall { ++ c := &NetworkAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.resource = resource +- c.regionsetpolicyrequest = regionsetpolicyrequest ++ c.networkAttachment = networkAttachment ++ c.networkattachment = networkattachment ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). end_interface: ++// MixerMutationRequestBuilder ++func (c *NetworkAttachmentsPatchCall) RequestId(requestId string) *NetworkAttachmentsPatchCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsSetIamPolicyCall { ++func (c *NetworkAttachmentsPatchCall) Fields(s ...googleapi.Field) *NetworkAttachmentsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -139164,21 +140917,21 @@ func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *Netwo + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsSetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsSetIamPolicyCall { ++func (c *NetworkAttachmentsPatchCall) Context(ctx context.Context) *NetworkAttachmentsPatchCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsSetIamPolicyCall) Header() http.Header { ++func (c *NetworkAttachmentsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -139186,36 +140939,36 @@ func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Respon + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, ++ "project": c.project, ++ "region": c.region, ++ "networkAttachment": c.networkAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.setIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++// Do executes the "compute.networkAttachments.patch" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -139234,7 +140987,7 @@ func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Policy{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -139246,16 +140999,23 @@ func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { +- // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", +- // "httpMethod": "POST", +- // "id": "compute.networkAttachments.setIamPolicy", ++ // "description": "Patches the specified NetworkAttachment resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", ++ // "httpMethod": "PATCH", ++ // "id": "compute.networkAttachments.patch", + // "parameterOrder": [ + // "project", + // "region", +- // "resource" ++ // "networkAttachment" + // ], + // "parameters": { ++ // "networkAttachment": { ++ // "description": "Name of the NetworkAttachment resource to patch.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -139264,26 +141024,24 @@ func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + // "type": "string" + // }, + // "region": { +- // "description": "The name of the region for this request.", ++ // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", ++ // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "request": { +- // "$ref": "RegionSetPolicyRequest" ++ // "$ref": "NetworkAttachment" + // }, + // "response": { +- // "$ref": "Policy" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -139293,38 +141051,38 @@ func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + + } + +-// method id "compute.networkAttachments.testIamPermissions": ++// method id "compute.networkAttachments.setIamPolicy": + +-type NetworkAttachmentsTestIamPermissionsCall struct { ++type NetworkAttachmentsSetIamPolicyCall struct { + s *Service + project string + region string + resource string +- testpermissionsrequest *TestPermissionsRequest ++ regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. + // + // - project: Project ID for this request. + // - region: The name of the region for this request. + // - resource: Name or id of the resource for this request. +-func (r *NetworkAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkAttachmentsTestIamPermissionsCall { +- c := &NetworkAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *NetworkAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NetworkAttachmentsSetIamPolicyCall { ++ c := &NetworkAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest ++ c.regionsetpolicyrequest = regionsetpolicyrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkAttachmentsTestIamPermissionsCall { ++func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -139332,21 +141090,21 @@ func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *NetworkAttachmentsTestIamPermissionsCall { ++func (c *NetworkAttachmentsSetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsSetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Header() http.Header { ++func (c *NetworkAttachmentsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -139354,14 +141112,14 @@ func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http. + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -139376,14 +141134,14 @@ func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http. + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++// Do executes the "compute.networkAttachments.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -139402,7 +141160,7 @@ func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOpti + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &TestPermissionsResponse{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -139414,10 +141172,178 @@ func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOpti + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + // "httpMethod": "POST", +- // "id": "compute.networkAttachments.testIamPermissions", ++ // "id": "compute.networkAttachments.setIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", ++ // "request": { ++ // "$ref": "RegionSetPolicyRequest" ++ // }, ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkAttachments.testIamPermissions": ++ ++type NetworkAttachmentsTestIamPermissionsCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *NetworkAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkAttachmentsTestIamPermissionsCall { ++ c := &NetworkAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkAttachmentsTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *NetworkAttachmentsTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkAttachmentsTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkAttachments.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.networkAttachments.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", +@@ -167075,83 +169001,86 @@ func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func( + } + } + +-// method id "compute.regionCommitments.get": ++// method id "compute.regionCommitments.calculateCancellationFee": + +-type RegionCommitmentsGetCall struct { +- s *Service +- project string +- region string +- commitment string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type RegionCommitmentsCalculateCancellationFeeCall struct { ++ s *Service ++ project string ++ region string ++ commitment string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified commitment resource. ++// CalculateCancellationFee: Calculate cancellation fee for the ++// specified commitment. + // +-// - commitment: Name of the commitment to return. ++// - commitment: Name of the commitment to delete. + // - project: Project ID for this request. + // - region: Name of the region for this request. +-func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { +- c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionCommitmentsService) CalculateCancellationFee(project string, region string, commitment string) *RegionCommitmentsCalculateCancellationFeeCall { ++ c := &RegionCommitmentsCalculateCancellationFeeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.commitment = commitment + return c + } + ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionCommitmentsCalculateCancellationFeeCall) RequestId(requestId string) *RegionCommitmentsCalculateCancellationFeeCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { ++func (c *RegionCommitmentsCalculateCancellationFeeCall) Fields(s ...googleapi.Field) *RegionCommitmentsCalculateCancellationFeeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { ++func (c *RegionCommitmentsCalculateCancellationFeeCall) Context(ctx context.Context) *RegionCommitmentsCalculateCancellationFeeCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionCommitmentsGetCall) Header() http.Header { ++func (c *RegionCommitmentsCalculateCancellationFeeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionCommitmentsCalculateCancellationFeeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}/calculateCancellationFee") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -167164,14 +169093,14 @@ func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionCommitments.get" call. +-// Exactly one of *Commitment or error will be non-nil. Any non-2xx ++// Do executes the "compute.regionCommitments.calculateCancellationFee" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either +-// *Commitment.ServerResponse.Header or (if a response was returned at ++// *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { ++func (c *RegionCommitmentsCalculateCancellationFeeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -167190,7 +169119,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Commitment{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -167202,10 +169131,10 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment + } + return ret, nil + // { +- // "description": "Returns the specified commitment resource.", +- // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", +- // "httpMethod": "GET", +- // "id": "compute.regionCommitments.get", ++ // "description": "Calculate cancellation fee for the specified commitment.", ++ // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}/calculateCancellationFee", ++ // "httpMethod": "POST", ++ // "id": "compute.regionCommitments.calculateCancellationFee", + // "parameterOrder": [ + // "project", + // "region", +@@ -167213,7 +169142,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment + // ], + // "parameters": { + // "commitment": { +- // "description": "Name of the commitment to return.", ++ // "description": "Name of the commitment to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -167232,40 +169161,44 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/commitments/{commitment}", ++ // "path": "projects/{project}/regions/{region}/commitments/{commitment}/calculateCancellationFee", + // "response": { +- // "$ref": "Commitment" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.regionCommitments.insert": ++// method id "compute.regionCommitments.cancel": + +-type RegionCommitmentsInsertCall struct { ++type RegionCommitmentsCancelCall struct { + s *Service + project string + region string +- commitment *Commitment ++ commitment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// Insert: Creates a commitment in the specified project using the data +-// included in the request. ++// Cancel: Cancel the specified commitment. + // ++// - commitment: Name of the commitment to delete. + // - project: Project ID for this request. + // - region: Name of the region for this request. +-func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { +- c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionCommitmentsService) Cancel(project string, region string, commitment string) *RegionCommitmentsCancelCall { ++ c := &RegionCommitmentsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.commitment = commitment +@@ -167283,7 +169216,7 @@ func (r *RegionCommitmentsService) Insert(project string, region string, commitm + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { ++func (c *RegionCommitmentsCancelCall) RequestId(requestId string) *RegionCommitmentsCancelCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -167291,7 +169224,7 @@ func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitm + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { ++func (c *RegionCommitmentsCancelCall) Fields(s ...googleapi.Field) *RegionCommitmentsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -167299,21 +169232,21 @@ func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommit + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { ++func (c *RegionCommitmentsCancelCall) Context(ctx context.Context) *RegionCommitmentsCancelCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionCommitmentsInsertCall) Header() http.Header { ++func (c *RegionCommitmentsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionCommitmentsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -167321,14 +169254,9 @@ func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, err + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}/cancel") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -167336,20 +169264,375 @@ func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, ++ "project": c.project, ++ "region": c.region, ++ "commitment": c.commitment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionCommitments.insert" call. ++// Do executes the "compute.regionCommitments.cancel" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionCommitmentsCancelCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Cancel the specified commitment.", ++ // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}/cancel", ++ // "httpMethod": "POST", ++ // "id": "compute.regionCommitments.cancel", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "commitment" ++ // ], ++ // "parameters": { ++ // "commitment": { ++ // "description": "Name of the commitment to delete.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/commitments/{commitment}/cancel", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionCommitments.get": ++ ++type RegionCommitmentsGetCall struct { ++ s *Service ++ project string ++ region string ++ commitment string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Get: Returns the specified commitment resource. ++// ++// - commitment: Name of the commitment to return. ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { ++ c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.commitment = commitment ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionCommitmentsGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "commitment": c.commitment, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionCommitments.get" call. ++// Exactly one of *Commitment or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Commitment.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Commitment{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns the specified commitment resource.", ++ // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", ++ // "httpMethod": "GET", ++ // "id": "compute.regionCommitments.get", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "commitment" ++ // ], ++ // "parameters": { ++ // "commitment": { ++ // "description": "Name of the commitment to return.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/commitments/{commitment}", ++ // "response": { ++ // "$ref": "Commitment" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionCommitments.insert": ++ ++type RegionCommitmentsInsertCall struct { ++ s *Service ++ project string ++ region string ++ commitment *Commitment ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates a commitment in the specified project using the data ++// included in the request. ++// ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { ++ c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.commitment = commitment ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionCommitmentsInsertCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionCommitments.insert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -178821,6 +181104,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl + } + return ret, nil + // { ++ // "deprecated": true, + // "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use regionInstanceGroupManagers.patch instead.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "httpMethod": "POST", +@@ -188099,6 +190383,195 @@ func (c *RegionNetworkFirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption + + } + ++// method id "compute.regionNetworkFirewallPolicies.patchAssociation": ++ ++type RegionNetworkFirewallPoliciesPatchAssociationCall struct { ++ s *Service ++ project string ++ region string ++ firewallPolicy string ++ firewallpolicyassociation *FirewallPolicyAssociation ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// PatchAssociation: Updates an association for the specified network ++// firewall policy. ++// ++// - firewallPolicy: Name of the firewall policy to update. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionNetworkFirewallPoliciesService) PatchAssociation(project string, region string, firewallPolicy string, firewallpolicyassociation *FirewallPolicyAssociation) *RegionNetworkFirewallPoliciesPatchAssociationCall { ++ c := &RegionNetworkFirewallPoliciesPatchAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.firewallPolicy = firewallPolicy ++ c.firewallpolicyassociation = firewallpolicyassociation ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) RequestId(requestId string) *RegionNetworkFirewallPoliciesPatchAssociationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesPatchAssociationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesPatchAssociationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyassociation) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchAssociation") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "firewallPolicy": c.firewallPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionNetworkFirewallPolicies.patchAssociation" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Updates an association for the specified network firewall policy.", ++ // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchAssociation", ++ // "httpMethod": "POST", ++ // "id": "compute.regionNetworkFirewallPolicies.patchAssociation", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "firewallPolicy" ++ // ], ++ // "parameters": { ++ // "firewallPolicy": { ++ // "description": "Name of the firewall policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchAssociation", ++ // "request": { ++ // "$ref": "FirewallPolicyAssociation" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionNetworkFirewallPolicies.patchRule": + + type RegionNetworkFirewallPoliciesPatchRuleCall struct { +@@ -205701,6 +208174,191 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { + + } + ++// method id "compute.routers.getNatIpInfo": ++ ++type RoutersGetNatIpInfoCall struct { ++ s *Service ++ project string ++ region string ++ router string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// GetNatIpInfo: Retrieves runtime NAT IP information. ++// ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++// - router: Name of the Router resource to query for Nat IP ++// information. The name should conform to RFC1035. ++func (r *RoutersService) GetNatIpInfo(project string, region string, router string) *RoutersGetNatIpInfoCall { ++ c := &RoutersGetNatIpInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.router = router ++ return c ++} ++ ++// NatName sets the optional parameter "natName": Name of the nat ++// service to filter the NAT IP information. If it is omitted, all nats ++// for this router will be returned. Name should conform to RFC1035. ++func (c *RoutersGetNatIpInfoCall) NatName(natName string) *RoutersGetNatIpInfoCall { ++ c.urlParams_.Set("natName", natName) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RoutersGetNatIpInfoCall) Fields(s ...googleapi.Field) *RoutersGetNatIpInfoCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RoutersGetNatIpInfoCall) IfNoneMatch(entityTag string) *RoutersGetNatIpInfoCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RoutersGetNatIpInfoCall) Context(ctx context.Context) *RoutersGetNatIpInfoCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RoutersGetNatIpInfoCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RoutersGetNatIpInfoCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "router": c.router, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.routers.getNatIpInfo" call. ++// Exactly one of *NatIpInfoResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *NatIpInfoResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RoutersGetNatIpInfoCall) Do(opts ...googleapi.CallOption) (*NatIpInfoResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &NatIpInfoResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves runtime NAT IP information.", ++ // "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", ++ // "httpMethod": "GET", ++ // "id": "compute.routers.getNatIpInfo", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "router" ++ // ], ++ // "parameters": { ++ // "natName": { ++ // "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "router": { ++ // "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", ++ // "response": { ++ // "$ref": "NatIpInfoResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ + // method id "compute.routers.getNatMappingInfo": + + type RoutersGetNatMappingInfoCall struct { +@@ -212201,24 +214859,6 @@ func (r *ServiceAttachmentsService) Patch(project string, region string, service + return c + } + +-// ReconcileConnections sets the optional parameter +-// "reconcileConnections": This flag determines how to change the status +-// of consumer connections, when the connection policy for the +-// corresponding project or network is modified. If the flag is false, +-// the default case, then existing ACCEPTED and REJECTED consumer +-// connections stay in that state. For example, even if the project is +-// removed from the accept list, existing ACCEPTED connections will stay +-// the same. If the flag is true, then the connection can change from +-// ACCEPTED or REJECTED to pending when the connection policy is +-// modified. For example, if a project is removed from the reject list, +-// its existing REJECTED connections will move to the PENDING state. If +-// the project is also added to the accept list, then those connections +-// will move to the ACCEPTED state. +-func (c *ServiceAttachmentsPatchCall) ReconcileConnections(reconcileConnections bool) *ServiceAttachmentsPatchCall { +- c.urlParams_.Set("reconcileConnections", fmt.Sprint(reconcileConnections)) +- return c +-} +- + // RequestId sets the optional parameter "requestId": An optional + // request ID to identify requests. Specify a unique request ID so that + // if you must retry your request, the server will know to ignore the +@@ -212345,11 +214985,6 @@ func (c *ServiceAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operati + // "required": true, + // "type": "string" + // }, +- // "reconcileConnections": { +- // "description": "This flag determines how to change the status of consumer connections, when the connection policy for the corresponding project or network is modified. If the flag is false, the default case, then existing ACCEPTED and REJECTED consumer connections stay in that state. For example, even if the project is removed from the accept list, existing ACCEPTED connections will stay the same. If the flag is true, then the connection can change from ACCEPTED or REJECTED to pending when the connection policy is modified. For example, if a project is removed from the reject list, its existing REJECTED connections will move to the PENDING state. If the project is also added to the accept list, then those connections will move to the ACCEPTED state.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "region": { + // "description": "The region scoping this request and should conform to RFC1035.", + // "location": "path", +@@ -217034,6 +219669,2041 @@ func (c *SslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T + + } + ++// method id "compute.storagePools.aggregatedList": ++ ++type StoragePoolsAggregatedListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// AggregatedList: Retrieves an aggregated list of storage pools. ++// ++// - project: Project ID for this request. ++func (r *StoragePoolsService) AggregatedList(project string) *StoragePoolsAggregatedListCall { ++ c := &StoragePoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *StoragePoolsAggregatedListCall) Filter(filter string) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// IncludeAllScopes sets the optional parameter "includeAllScopes": ++// Indicates whether every visible scope for each scope type (zone, ++// region, global) should be included in the response. For new resource ++// types added after this field, the flag has no effect as new resource ++// types will always include every visible scope for each scope type in ++// response. For resource types which predate this field, if this flag ++// is omitted or false, only scopes of the scope types where the ++// resource type is expected to be found will be included. ++func (c *StoragePoolsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *StoragePoolsAggregatedListCall) MaxResults(maxResults int64) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *StoragePoolsAggregatedListCall) OrderBy(orderBy string) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *StoragePoolsAggregatedListCall) PageToken(pageToken string) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *StoragePoolsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsAggregatedListCall) Fields(s ...googleapi.Field) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *StoragePoolsAggregatedListCall) IfNoneMatch(entityTag string) *StoragePoolsAggregatedListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsAggregatedListCall) Context(ctx context.Context) *StoragePoolsAggregatedListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsAggregatedListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/storagePools") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.aggregatedList" call. ++// Exactly one of *StoragePoolAggregatedList or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *StoragePoolAggregatedList.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *StoragePoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*StoragePoolAggregatedList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &StoragePoolAggregatedList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves an aggregated list of storage pools.", ++ // "flatPath": "projects/{project}/aggregated/storagePools", ++ // "httpMethod": "GET", ++ // "id": "compute.storagePools.aggregatedList", ++ // "parameterOrder": [ ++ // "project" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "includeAllScopes": { ++ // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/aggregated/storagePools", ++ // "response": { ++ // "$ref": "StoragePoolAggregatedList" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *StoragePoolsAggregatedListCall) Pages(ctx context.Context, f func(*StoragePoolAggregatedList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.storagePools.delete": ++ ++type StoragePoolsDeleteCall struct { ++ s *Service ++ project string ++ zone string ++ storagePool string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Delete: Deletes the specified storage pool. Deleting a storagePool ++// removes its data permanently and is irreversible. However, deleting a ++// storagePool does not delete any snapshots previously made from the ++// storagePool. You must separately delete snapshots. ++// ++// - project: Project ID for this request. ++// - storagePool: Name of the storage pool to delete. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) Delete(project string, zone string, storagePool string) *StoragePoolsDeleteCall { ++ c := &StoragePoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.storagePool = storagePool ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *StoragePoolsDeleteCall) RequestId(requestId string) *StoragePoolsDeleteCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsDeleteCall) Fields(s ...googleapi.Field) *StoragePoolsDeleteCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsDeleteCall) Context(ctx context.Context) *StoragePoolsDeleteCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsDeleteCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsDeleteCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{storagePool}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("DELETE", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "storagePool": c.storagePool, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.delete" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *StoragePoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Deletes the specified storage pool. Deleting a storagePool removes its data permanently and is irreversible. However, deleting a storagePool does not delete any snapshots previously made from the storagePool. You must separately delete snapshots.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.storagePools.delete", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "storagePool" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "storagePool": { ++ // "description": "Name of the storage pool to delete.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.get": ++ ++type StoragePoolsGetCall struct { ++ s *Service ++ project string ++ zone string ++ storagePool string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Get: Returns a specified storage pool. Gets a list of available ++// storage pools by making a list() request. ++// ++// - project: Project ID for this request. ++// - storagePool: Name of the storage pool to return. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) Get(project string, zone string, storagePool string) *StoragePoolsGetCall { ++ c := &StoragePoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.storagePool = storagePool ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsGetCall) Fields(s ...googleapi.Field) *StoragePoolsGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *StoragePoolsGetCall) IfNoneMatch(entityTag string) *StoragePoolsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsGetCall) Context(ctx context.Context) *StoragePoolsGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{storagePool}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "storagePool": c.storagePool, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.get" call. ++// Exactly one of *StoragePool or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *StoragePool.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *StoragePoolsGetCall) Do(opts ...googleapi.CallOption) (*StoragePool, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &StoragePool{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns a specified storage pool. Gets a list of available storage pools by making a list() request.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "httpMethod": "GET", ++ // "id": "compute.storagePools.get", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "storagePool" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "storagePool": { ++ // "description": "Name of the storage pool to return.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "response": { ++ // "$ref": "StoragePool" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.getIamPolicy": ++ ++type StoragePoolsGetIamPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// GetIamPolicy: Gets the access control policy for a resource. May be ++// empty if no such policy or resource exists. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) GetIamPolicy(project string, zone string, resource string) *StoragePoolsGetIamPolicyCall { ++ c := &StoragePoolsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ return c ++} ++ ++// OptionsRequestedPolicyVersion sets the optional parameter ++// "optionsRequestedPolicyVersion": Requested IAM Policy version. ++func (c *StoragePoolsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *StoragePoolsGetIamPolicyCall { ++ c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsGetIamPolicyCall) Fields(s ...googleapi.Field) *StoragePoolsGetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *StoragePoolsGetIamPolicyCall) IfNoneMatch(entityTag string) *StoragePoolsGetIamPolicyCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsGetIamPolicyCall) Context(ctx context.Context) *StoragePoolsGetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsGetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{resource}/getIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.getIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *StoragePoolsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/getIamPolicy", ++ // "httpMethod": "GET", ++ // "id": "compute.storagePools.getIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "optionsRequestedPolicyVersion": { ++ // "description": "Requested IAM Policy version.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{resource}/getIamPolicy", ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.insert": ++ ++type StoragePoolsInsertCall struct { ++ s *Service ++ project string ++ zone string ++ storagepool *StoragePool ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates a storage pool in the specified project using the ++// data in the request. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) Insert(project string, zone string, storagepool *StoragePool) *StoragePoolsInsertCall { ++ c := &StoragePoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.storagepool = storagepool ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *StoragePoolsInsertCall) RequestId(requestId string) *StoragePoolsInsertCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsInsertCall) Fields(s ...googleapi.Field) *StoragePoolsInsertCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsInsertCall) Context(ctx context.Context) *StoragePoolsInsertCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsInsertCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsInsertCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.storagepool) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.insert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *StoragePoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Creates a storage pool in the specified project using the data in the request.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools", ++ // "httpMethod": "POST", ++ // "id": "compute.storagePools.insert", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools", ++ // "request": { ++ // "$ref": "StoragePool" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.list": ++ ++type StoragePoolsListCall struct { ++ s *Service ++ project string ++ zone string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// List: Retrieves a list of storage pools contained within the ++// specified zone. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) List(project string, zone string) *StoragePoolsListCall { ++ c := &StoragePoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *StoragePoolsListCall) Filter(filter string) *StoragePoolsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *StoragePoolsListCall) MaxResults(maxResults int64) *StoragePoolsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *StoragePoolsListCall) OrderBy(orderBy string) *StoragePoolsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *StoragePoolsListCall) PageToken(pageToken string) *StoragePoolsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *StoragePoolsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *StoragePoolsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsListCall) Fields(s ...googleapi.Field) *StoragePoolsListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *StoragePoolsListCall) IfNoneMatch(entityTag string) *StoragePoolsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsListCall) Context(ctx context.Context) *StoragePoolsListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.list" call. ++// Exactly one of *StoragePoolList or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *StoragePoolList.ServerResponse.Header or (if a response was returned ++// at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *StoragePoolsListCall) Do(opts ...googleapi.CallOption) (*StoragePoolList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &StoragePoolList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves a list of storage pools contained within the specified zone.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools", ++ // "httpMethod": "GET", ++ // "id": "compute.storagePools.list", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools", ++ // "response": { ++ // "$ref": "StoragePoolList" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *StoragePoolsListCall) Pages(ctx context.Context, f func(*StoragePoolList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.storagePools.setIamPolicy": ++ ++type StoragePoolsSetIamPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ zonesetpolicyrequest *ZoneSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *StoragePoolsSetIamPolicyCall { ++ c := &StoragePoolsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ c.zonesetpolicyrequest = zonesetpolicyrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsSetIamPolicyCall) Fields(s ...googleapi.Field) *StoragePoolsSetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsSetIamPolicyCall) Context(ctx context.Context) *StoragePoolsSetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsSetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{resource}/setIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *StoragePoolsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/setIamPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.storagePools.setIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{resource}/setIamPolicy", ++ // "request": { ++ // "$ref": "ZoneSetPolicyRequest" ++ // }, ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.setLabels": ++ ++type StoragePoolsSetLabelsCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ zonesetlabelsrequest *ZoneSetLabelsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetLabels: Sets the labels on a storage pools. To learn more about ++// labels, read the Labeling Resources documentation. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *StoragePoolsSetLabelsCall { ++ c := &StoragePoolsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ c.zonesetlabelsrequest = zonesetlabelsrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *StoragePoolsSetLabelsCall) RequestId(requestId string) *StoragePoolsSetLabelsCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsSetLabelsCall) Fields(s ...googleapi.Field) *StoragePoolsSetLabelsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsSetLabelsCall) Context(ctx context.Context) *StoragePoolsSetLabelsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsSetLabelsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsSetLabelsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{resource}/setLabels") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.setLabels" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *StoragePoolsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the labels on a storage pools. To learn more about labels, read the Labeling Resources documentation.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/setLabels", ++ // "httpMethod": "POST", ++ // "id": "compute.storagePools.setLabels", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{resource}/setLabels", ++ // "request": { ++ // "$ref": "ZoneSetLabelsRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.testIamPermissions": ++ ++type StoragePoolsTestIamPermissionsCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *StoragePoolsTestIamPermissionsCall { ++ c := &StoragePoolsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsTestIamPermissionsCall) Fields(s ...googleapi.Field) *StoragePoolsTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsTestIamPermissionsCall) Context(ctx context.Context) *StoragePoolsTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *StoragePoolsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.storagePools.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.update": ++ ++type StoragePoolsUpdateCall struct { ++ s *Service ++ project string ++ zone string ++ storagePool string ++ storagepool *StoragePool ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Update: Updates the specified storagePool with the data included in ++// the request. The update is performed only on selected fields included ++// as part of update-mask. Only the following fields can be modified: ++// size_tb and provisioned_iops. ++// ++// - project: Project ID for this request. ++// - storagePool: The storagePool name for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) Update(project string, zone string, storagePool string, storagepool *StoragePool) *StoragePoolsUpdateCall { ++ c := &StoragePoolsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.storagePool = storagePool ++ c.storagepool = storagepool ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *StoragePoolsUpdateCall) RequestId(requestId string) *StoragePoolsUpdateCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// UpdateMask sets the optional parameter "updateMask": update_mask ++// indicates fields to be updated as part of this request. ++func (c *StoragePoolsUpdateCall) UpdateMask(updateMask string) *StoragePoolsUpdateCall { ++ c.urlParams_.Set("updateMask", updateMask) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsUpdateCall) Fields(s ...googleapi.Field) *StoragePoolsUpdateCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsUpdateCall) Context(ctx context.Context) *StoragePoolsUpdateCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsUpdateCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsUpdateCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.storagepool) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{storagePool}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("PATCH", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "storagePool": c.storagePool, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.update" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *StoragePoolsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Updates the specified storagePool with the data included in the request. The update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: size_tb and provisioned_iops.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "httpMethod": "PATCH", ++ // "id": "compute.storagePools.update", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "storagePool" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "storagePool": { ++ // "description": "The storagePool name for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "updateMask": { ++ // "description": "update_mask indicates fields to be updated as part of this request.", ++ // "format": "google-fieldmask", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "request": { ++ // "$ref": "StoragePool" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.subnetworks.aggregatedList": + + type SubnetworksAggregatedListCall struct { +@@ -225652,6 +230322,196 @@ func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInsta + } + } + ++// method id "compute.targetInstances.setSecurityPolicy": ++ ++type TargetInstancesSetSecurityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ targetInstance string ++ securitypolicyreference *SecurityPolicyReference ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified target instance. For more information, see Google Cloud ++// Armor Overview ++// ++// - project: Project ID for this request. ++// - targetInstance: Name of the TargetInstance resource to which the ++// security policy should be set. The name should conform to RFC1035. ++// - zone: Name of the zone scoping this request. ++func (r *TargetInstancesService) SetSecurityPolicy(project string, zone string, targetInstance string, securitypolicyreference *SecurityPolicyReference) *TargetInstancesSetSecurityPolicyCall { ++ c := &TargetInstancesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.targetInstance = targetInstance ++ c.securitypolicyreference = securitypolicyreference ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *TargetInstancesSetSecurityPolicyCall) RequestId(requestId string) *TargetInstancesSetSecurityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *TargetInstancesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *TargetInstancesSetSecurityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *TargetInstancesSetSecurityPolicyCall) Context(ctx context.Context) *TargetInstancesSetSecurityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *TargetInstancesSetSecurityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *TargetInstancesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "targetInstance": c.targetInstance, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.targetInstances.setSecurityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *TargetInstancesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Google Cloud Armor security policy for the specified target instance. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.targetInstances.setSecurityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "targetInstance" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "targetInstance": { ++ // "description": "Name of the TargetInstance resource to which the security policy should be set. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "Name of the zone scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ // "request": { ++ // "$ref": "SecurityPolicyReference" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.targetInstances.testIamPermissions": + + type TargetInstancesTestIamPermissionsCall struct { +@@ -228062,6 +232922,196 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + ++// method id "compute.targetPools.setSecurityPolicy": ++ ++type TargetPoolsSetSecurityPolicyCall struct { ++ s *Service ++ project string ++ region string ++ targetPool string ++ securitypolicyreference *SecurityPolicyReference ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified target pool. For more information, see Google Cloud ++// Armor Overview ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - targetPool: Name of the TargetPool resource to which the security ++// policy should be set. The name should conform to RFC1035. ++func (r *TargetPoolsService) SetSecurityPolicy(project string, region string, targetPool string, securitypolicyreference *SecurityPolicyReference) *TargetPoolsSetSecurityPolicyCall { ++ c := &TargetPoolsSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.targetPool = targetPool ++ c.securitypolicyreference = securitypolicyreference ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *TargetPoolsSetSecurityPolicyCall) RequestId(requestId string) *TargetPoolsSetSecurityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *TargetPoolsSetSecurityPolicyCall) Fields(s ...googleapi.Field) *TargetPoolsSetSecurityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *TargetPoolsSetSecurityPolicyCall) Context(ctx context.Context) *TargetPoolsSetSecurityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *TargetPoolsSetSecurityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *TargetPoolsSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "targetPool": c.targetPool, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.targetPools.setSecurityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *TargetPoolsSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Google Cloud Armor security policy for the specified target pool. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.targetPools.setSecurityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "targetPool" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "targetPool": { ++ // "description": "Name of the TargetPool resource to which the security policy should be set. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ // "request": { ++ // "$ref": "SecurityPolicyReference" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.targetPools.testIamPermissions": + + type TargetPoolsTestIamPermissionsCall struct { +diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +index 63a7da87c21..9ed51146330 100644 +--- a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json ++++ b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +@@ -550,6 +550,56 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "move": { ++ "description": "Moves the specified address resource.", ++ "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", ++ "httpMethod": "POST", ++ "id": "compute.addresses.move", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "address" ++ ], ++ "parameters": { ++ "address": { ++ "description": "Name of the address resource to move.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Source project ID which the Address is moved from.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/addresses/{address}/move", ++ "request": { ++ "$ref": "RegionAddressesMoveRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setLabels": { + "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", +@@ -2457,6 +2507,48 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "bulkInsert": { ++ "description": "Bulk create a set of disks.", ++ "flatPath": "projects/{project}/zones/{zone}/disks/bulkInsert", ++ "httpMethod": "POST", ++ "id": "compute.disks.bulkInsert", ++ "parameterOrder": [ ++ "project", ++ "zone" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/disks/bulkInsert", ++ "request": { ++ "$ref": "BulkInsertDiskResource" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "createSnapshot": { + "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", + "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/createSnapshot", +@@ -2955,6 +3047,145 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "startAsyncReplication": { ++ "description": "Starts asynchronous replication. Must be invoked on the primary disk.", ++ "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", ++ "httpMethod": "POST", ++ "id": "compute.disks.startAsyncReplication", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "disk" ++ ], ++ "parameters": { ++ "disk": { ++ "description": "The name of the persistent disk.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", ++ "request": { ++ "$ref": "DisksStartAsyncReplicationRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "stopAsyncReplication": { ++ "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", ++ "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", ++ "httpMethod": "POST", ++ "id": "compute.disks.stopAsyncReplication", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "disk" ++ ], ++ "parameters": { ++ "disk": { ++ "description": "The name of the persistent disk.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "stopGroupAsyncReplication": { ++ "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", ++ "flatPath": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", ++ "httpMethod": "POST", ++ "id": "compute.disks.stopGroupAsyncReplication", ++ "parameterOrder": [ ++ "project", ++ "zone" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request. This must be the zone of the primary or secondary disks in the consistency group.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", ++ "request": { ++ "$ref": "DisksStopGroupAsyncReplicationResource" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", +@@ -4827,6 +5058,48 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "move": { ++ "description": "Moves the specified address resource from one project to another project.", ++ "flatPath": "projects/{project}/global/addresses/{address}/move", ++ "httpMethod": "POST", ++ "id": "compute.globalAddresses.move", ++ "parameterOrder": [ ++ "project", ++ "address" ++ ], ++ "parameters": { ++ "address": { ++ "description": "Name of the address resource to move.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Source project ID which the Address is moved from.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/addresses/{address}/move", ++ "request": { ++ "$ref": "GlobalAddressesMoveRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setLabels": { + "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", +@@ -8437,6 +8710,7 @@ + ] + }, + "setAutoHealingPolicies": { ++ "deprecated": true, + "description": "Motifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use instanceGroupManagers.patch instead.", + "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + "httpMethod": "POST", +@@ -11274,11 +11548,11 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "setServiceAccount": { +- "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", +- "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified instance. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + "httpMethod": "POST", +- "id": "compute.instances.setServiceAccount", ++ "id": "compute.instances.setSecurityPolicy", + "parameterOrder": [ + "project", + "zone", +@@ -11286,9 +11560,8 @@ + ], + "parameters": { + "instance": { +- "description": "Name of the instance resource to start.", ++ "description": "Name of the Instance resource to which the security policy should be set. The name should conform to RFC1035.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +@@ -11305,16 +11578,16 @@ + "type": "string" + }, + "zone": { +- "description": "The name of the zone for this request.", ++ "description": "Name of the zone scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ "path": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + "request": { +- "$ref": "InstancesSetServiceAccountRequest" ++ "$ref": "InstancesSetSecurityPolicyRequest" + }, + "response": { + "$ref": "Operation" +@@ -11324,11 +11597,11 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "setShieldedInstanceIntegrityPolicy": { +- "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", +- "httpMethod": "PATCH", +- "id": "compute.instances.setShieldedInstanceIntegrityPolicy", ++ "setServiceAccount": { ++ "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", ++ "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ "httpMethod": "POST", ++ "id": "compute.instances.setServiceAccount", + "parameterOrder": [ + "project", + "zone", +@@ -11336,7 +11609,57 @@ + ], + "parameters": { + "instance": { +- "description": "Name or id of the instance scoping this request.", ++ "description": "Name of the instance resource to start.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ "request": { ++ "$ref": "InstancesSetServiceAccountRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "setShieldedInstanceIntegrityPolicy": { ++ "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", ++ "httpMethod": "PATCH", ++ "id": "compute.instances.setShieldedInstanceIntegrityPolicy", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "instance" ++ ], ++ "parameters": { ++ "instance": { ++ "description": "Name or id of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -11499,6 +11822,11 @@ + "required": true, + "type": "string" + }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", +@@ -12113,13 +12441,13 @@ + } + } + }, +- "interconnectAttachments": { ++ "instantSnapshots": { + "methods": { + "aggregatedList": { +- "description": "Retrieves an aggregated list of interconnect attachments.", +- "flatPath": "projects/{project}/aggregated/interconnectAttachments", ++ "description": "Retrieves an aggregated list of instantSnapshots.", ++ "flatPath": "projects/{project}/aggregated/instantSnapshots", + "httpMethod": "GET", +- "id": "compute.interconnectAttachments.aggregatedList", ++ "id": "compute.instantSnapshots.aggregatedList", + "parameterOrder": [ + "project" + ], +@@ -12165,9 +12493,9 @@ + "type": "boolean" + } + }, +- "path": "projects/{project}/aggregated/interconnectAttachments", ++ "path": "projects/{project}/aggregated/instantSnapshots", + "response": { +- "$ref": "InterconnectAttachmentAggregatedList" ++ "$ref": "InstantSnapshotAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12176,18 +12504,18 @@ + ] + }, + "delete": { +- "description": "Deletes the specified interconnect attachment.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ "description": "Deletes the specified InstantSnapshot resource. Keep in mind that deleting a single instantSnapshot might not necessarily delete all the data on that instantSnapshot. If any data on the instantSnapshot that is marked for deletion is needed for subsequent instantSnapshots, the data will be moved to the next corresponding instantSnapshot. For more information, see Deleting instantSnapshots.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + "httpMethod": "DELETE", +- "id": "compute.interconnectAttachments.delete", ++ "id": "compute.instantSnapshots.delete", + "parameterOrder": [ + "project", +- "region", +- "interconnectAttachment" ++ "zone", ++ "instantSnapshot" + ], + "parameters": { +- "interconnectAttachment": { +- "description": "Name of the interconnect attachment to delete.", ++ "instantSnapshot": { ++ "description": "Name of the InstantSnapshot resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -12200,20 +12528,70 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region for this request.", ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "export": { ++ "description": "Export the changed blocks between two instant snapshots to a customer's bucket in the user specified format.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}/export", ++ "httpMethod": "POST", ++ "id": "compute.instantSnapshots.export", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "instantSnapshot" ++ ], ++ "parameters": { ++ "instantSnapshot": { ++ "description": "Name of the instant snapshot to export.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}/export", ++ "request": { ++ "$ref": "InstantSnapshotsExportRequest" ++ }, + "response": { + "$ref": "Operation" + }, +@@ -12223,18 +12601,18 @@ + ] + }, + "get": { +- "description": "Returns the specified interconnect attachment.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ "description": "Returns the specified InstantSnapshot resource in the specified zone.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + "httpMethod": "GET", +- "id": "compute.interconnectAttachments.get", ++ "id": "compute.instantSnapshots.get", + "parameterOrder": [ + "project", +- "region", +- "interconnectAttachment" ++ "zone", ++ "instantSnapshot" + ], + "parameters": { +- "interconnectAttachment": { +- "description": "Name of the interconnect attachment to return.", ++ "instantSnapshot": { ++ "description": "Name of the InstantSnapshot resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -12247,17 +12625,17 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region for this request.", ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + "response": { +- "$ref": "InterconnectAttachment" ++ "$ref": "InstantSnapshot" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12265,16 +12643,23 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "insert": { +- "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", +- "httpMethod": "POST", +- "id": "compute.interconnectAttachments.insert", ++ "getIamPolicy": { ++ "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/getIamPolicy", ++ "httpMethod": "GET", ++ "id": "compute.instantSnapshots.getIamPolicy", + "parameterOrder": [ + "project", +- "region" ++ "zone", ++ "resource" + ], + "parameters": { ++ "optionsRequestedPolicyVersion": { ++ "description": "Requested IAM Policy version.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -12282,27 +12667,64 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region for this request.", ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/getIamPolicy", ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Creates an instant snapshot in the specified zone.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots", ++ "httpMethod": "POST", ++ "id": "compute.instantSnapshots.insert", ++ "parameterOrder": [ ++ "project", ++ "zone" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, +- "validateOnly": { +- "description": "If true, the request will not be committed.", +- "location": "query", +- "type": "boolean" ++ "zone": { ++ "description": "Name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots", + "request": { +- "$ref": "InterconnectAttachment" ++ "$ref": "InstantSnapshot" + }, + "response": { + "$ref": "Operation" +@@ -12313,13 +12735,13 @@ + ] + }, + "list": { +- "description": "Retrieves the list of interconnect attachments contained within the specified region.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", ++ "description": "Retrieves the list of InstantSnapshot resources contained within the specified zone.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots", + "httpMethod": "GET", +- "id": "compute.interconnectAttachments.list", ++ "id": "compute.instantSnapshots.list", + "parameterOrder": [ + "project", +- "region" ++ "zone" + ], + "parameters": { + "filter": { +@@ -12352,22 +12774,22 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region for this request.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- "required": true, +- "type": "string" +- }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots", + "response": { +- "$ref": "InterconnectAttachmentList" ++ "$ref": "InstantSnapshotList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12375,24 +12797,17 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "patch": { +- "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", +- "httpMethod": "PATCH", +- "id": "compute.interconnectAttachments.patch", ++ "setIamPolicy": { ++ "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setIamPolicy", ++ "httpMethod": "POST", ++ "id": "compute.instantSnapshots.setIamPolicy", + "parameterOrder": [ + "project", +- "region", +- "interconnectAttachment" ++ "zone", ++ "resource" + ], + "parameters": { +- "interconnectAttachment": { +- "description": "Name of the interconnect attachment to patch.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- "required": true, +- "type": "string" +- }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -12400,25 +12815,27 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", ++ "resource": { ++ "description": "Name or id of the resource for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "requestId": { +- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- "location": "query", ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setIamPolicy", + "request": { +- "$ref": "InterconnectAttachment" ++ "$ref": "ZoneSetPolicyRequest" + }, + "response": { +- "$ref": "Operation" ++ "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12426,13 +12843,13 @@ + ] + }, + "setLabels": { +- "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", ++ "description": "Sets the labels on a instantSnapshot in the given zone. To learn more about labels, read the Labeling Resources documentation.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setLabels", + "httpMethod": "POST", +- "id": "compute.interconnectAttachments.setLabels", ++ "id": "compute.instantSnapshots.setLabels", + "parameterOrder": [ + "project", +- "region", ++ "zone", + "resource" + ], + "parameters": { +@@ -12443,13 +12860,6 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "The region for this request.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- "required": true, +- "type": "string" +- }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", +@@ -12461,11 +12871,18 @@ + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setLabels", + "request": { +- "$ref": "RegionSetLabelsRequest" ++ "$ref": "ZoneSetLabelsRequest" + }, + "response": { + "$ref": "Operation" +@@ -12477,12 +12894,12 @@ + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/testIamPermissions", + "httpMethod": "POST", +- "id": "compute.interconnectAttachments.testIamPermissions", ++ "id": "compute.instantSnapshots.testIamPermissions", + "parameterOrder": [ + "project", +- "region", ++ "zone", + "resource" + ], + "parameters": { +@@ -12493,22 +12910,22 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "The name of the region for this request.", ++ "resource": { ++ "description": "Name or id of the resource for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "resource": { +- "description": "Name or id of the resource for this request.", ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, +@@ -12523,48 +12940,13 @@ + } + } + }, +- "interconnectLocations": { ++ "interconnectAttachments": { + "methods": { +- "get": { +- "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", +- "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", +- "httpMethod": "GET", +- "id": "compute.interconnectLocations.get", +- "parameterOrder": [ +- "project", +- "interconnectLocation" +- ], +- "parameters": { +- "interconnectLocation": { +- "description": "Name of the interconnect location to return.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- "required": true, +- "type": "string" +- }, +- "project": { +- "description": "Project ID for this request.", +- "location": "path", +- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- "required": true, +- "type": "string" +- } +- }, +- "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", +- "response": { +- "$ref": "InterconnectLocation" +- }, +- "scopes": [ +- "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" +- ] +- }, +- "list": { +- "description": "Retrieves the list of interconnect locations available to the specified project.", +- "flatPath": "projects/{project}/global/interconnectLocations", ++ "aggregatedList": { ++ "description": "Retrieves an aggregated list of interconnect attachments.", ++ "flatPath": "projects/{project}/aggregated/interconnectAttachments", + "httpMethod": "GET", +- "id": "compute.interconnectLocations.list", ++ "id": "compute.interconnectAttachments.aggregatedList", + "parameterOrder": [ + "project" + ], +@@ -12574,6 +12956,11 @@ + "location": "query", + "type": "string" + }, ++ "includeAllScopes": { ++ "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ "location": "query", ++ "type": "boolean" ++ }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -12605,32 +12992,29 @@ + "type": "boolean" + } + }, +- "path": "projects/{project}/global/interconnectLocations", ++ "path": "projects/{project}/aggregated/interconnectAttachments", + "response": { +- "$ref": "InterconnectLocationList" ++ "$ref": "InterconnectAttachmentAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] +- } +- } +- }, +- "interconnects": { +- "methods": { ++ }, + "delete": { +- "description": "Deletes the specified Interconnect.", +- "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ "description": "Deletes the specified interconnect attachment.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "DELETE", +- "id": "compute.interconnects.delete", ++ "id": "compute.interconnectAttachments.delete", + "parameterOrder": [ + "project", +- "interconnect" ++ "region", ++ "interconnectAttachment" + ], + "parameters": { +- "interconnect": { +- "description": "Name of the interconnect to delete.", ++ "interconnectAttachment": { ++ "description": "Name of the interconnect attachment to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -12643,13 +13027,20 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, +- "path": "projects/{project}/global/interconnects/{interconnect}", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "response": { + "$ref": "Operation" + }, +@@ -12659,17 +13050,18 @@ + ] + }, + "get": { +- "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", +- "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ "description": "Returns the specified interconnect attachment.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "GET", +- "id": "compute.interconnects.get", ++ "id": "compute.interconnectAttachments.get", + "parameterOrder": [ + "project", +- "interconnect" ++ "region", ++ "interconnectAttachment" + ], + "parameters": { +- "interconnect": { +- "description": "Name of the interconnect to return.", ++ "interconnectAttachment": { ++ "description": "Name of the interconnect attachment to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -12681,46 +13073,18 @@ + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" +- } +- }, +- "path": "projects/{project}/global/interconnects/{interconnect}", +- "response": { +- "$ref": "Interconnect" +- }, +- "scopes": [ +- "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" +- ] +- }, +- "getDiagnostics": { +- "description": "Returns the interconnectDiagnostics for the specified Interconnect.", +- "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", +- "httpMethod": "GET", +- "id": "compute.interconnects.getDiagnostics", +- "parameterOrder": [ +- "project", +- "interconnect" +- ], +- "parameters": { +- "interconnect": { +- "description": "Name of the interconnect resource to query.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- "required": true, +- "type": "string" + }, +- "project": { +- "description": "Project ID for this request.", ++ "region": { ++ "description": "Name of the region for this request.", + "location": "path", +- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "response": { +- "$ref": "InterconnectsGetDiagnosticsResponse" ++ "$ref": "InterconnectAttachment" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12729,12 +13093,13 @@ + ] + }, + "insert": { +- "description": "Creates an Interconnect in the specified project using the data included in the request.", +- "flatPath": "projects/{project}/global/interconnects", ++ "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", + "httpMethod": "POST", +- "id": "compute.interconnects.insert", ++ "id": "compute.interconnectAttachments.insert", + "parameterOrder": [ +- "project" ++ "project", ++ "region" + ], + "parameters": { + "project": { +@@ -12744,15 +13109,27 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" ++ }, ++ "validateOnly": { ++ "description": "If true, the request will not be committed.", ++ "location": "query", ++ "type": "boolean" + } + }, +- "path": "projects/{project}/global/interconnects", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments", + "request": { +- "$ref": "Interconnect" ++ "$ref": "InterconnectAttachment" + }, + "response": { + "$ref": "Operation" +@@ -12763,12 +13140,13 @@ + ] + }, + "list": { +- "description": "Retrieves the list of Interconnects available to the specified project.", +- "flatPath": "projects/{project}/global/interconnects", ++ "description": "Retrieves the list of interconnect attachments contained within the specified region.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", + "httpMethod": "GET", +- "id": "compute.interconnects.list", ++ "id": "compute.interconnectAttachments.list", + "parameterOrder": [ +- "project" ++ "project", ++ "region" + ], + "parameters": { + "filter": { +@@ -12801,15 +13179,22 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, +- "path": "projects/{project}/global/interconnects", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments", + "response": { +- "$ref": "InterconnectList" ++ "$ref": "InterconnectAttachmentList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12818,17 +13203,18 @@ + ] + }, + "patch": { +- "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "PATCH", +- "id": "compute.interconnects.patch", ++ "id": "compute.interconnectAttachments.patch", + "parameterOrder": [ + "project", +- "interconnect" ++ "region", ++ "interconnectAttachment" + ], + "parameters": { +- "interconnect": { +- "description": "Name of the interconnect to update.", ++ "interconnectAttachment": { ++ "description": "Name of the interconnect attachment to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -12841,15 +13227,22 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, +- "path": "projects/{project}/global/interconnects/{interconnect}", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "request": { +- "$ref": "Interconnect" ++ "$ref": "InterconnectAttachment" + }, + "response": { + "$ref": "Operation" +@@ -12860,12 +13253,13 @@ + ] + }, + "setLabels": { +- "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", +- "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", ++ "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + "httpMethod": "POST", +- "id": "compute.interconnects.setLabels", ++ "id": "compute.interconnectAttachments.setLabels", + "parameterOrder": [ + "project", ++ "region", + "resource" + ], + "parameters": { +@@ -12876,17 +13270,29 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "The region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/global/interconnects/{resource}/setLabels", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + "request": { +- "$ref": "GlobalSetLabelsRequest" ++ "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" +@@ -12898,11 +13304,12 @@ + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", +- "flatPath": "projects/{project}/global/interconnects/{resource}/testIamPermissions", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", + "httpMethod": "POST", +- "id": "compute.interconnects.testIamPermissions", ++ "id": "compute.interconnectAttachments.testIamPermissions", + "parameterOrder": [ + "project", ++ "region", + "resource" + ], + "parameters": { +@@ -12913,15 +13320,22 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/global/interconnects/{resource}/testIamPermissions", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, +@@ -12936,22 +13350,22 @@ + } + } + }, +- "licenseCodes": { ++ "interconnectLocations": { + "methods": { + "get": { +- "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenseCodes/{licenseCode}", ++ "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", ++ "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", + "httpMethod": "GET", +- "id": "compute.licenseCodes.get", ++ "id": "compute.interconnectLocations.get", + "parameterOrder": [ + "project", +- "licenseCode" ++ "interconnectLocation" + ], + "parameters": { +- "licenseCode": { +- "description": "Number corresponding to the License code resource to return.", ++ "interconnectLocation": { ++ "description": "Name of the interconnect location to return.", + "location": "path", +- "pattern": "[0-9]{0,61}?", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +@@ -12963,9 +13377,9 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenseCodes/{licenseCode}", ++ "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", + "response": { +- "$ref": "LicenseCode" ++ "$ref": "InterconnectLocation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12973,16 +13387,38 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "testIamPermissions": { +- "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", +- "httpMethod": "POST", +- "id": "compute.licenseCodes.testIamPermissions", ++ "list": { ++ "description": "Retrieves the list of interconnect locations available to the specified project.", ++ "flatPath": "projects/{project}/global/interconnectLocations", ++ "httpMethod": "GET", ++ "id": "compute.interconnectLocations.list", + "parameterOrder": [ +- "project", +- "resource" ++ "project" + ], + "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -12990,20 +13426,109 @@ + "required": true, + "type": "string" + }, +- "resource": { +- "description": "Name or id of the resource for this request.", ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/global/interconnectLocations", ++ "response": { ++ "$ref": "InterconnectLocationList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ } ++ } ++ }, ++ "interconnectRemoteLocations": { ++ "methods": { ++ "get": { ++ "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", ++ "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ "httpMethod": "GET", ++ "id": "compute.interconnectRemoteLocations.get", ++ "parameterOrder": [ ++ "project", ++ "interconnectRemoteLocation" ++ ], ++ "parameters": { ++ "interconnectRemoteLocation": { ++ "description": "Name of the interconnect remote location to return.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", +- "request": { +- "$ref": "TestPermissionsRequest" ++ "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ "response": { ++ "$ref": "InterconnectRemoteLocation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "list": { ++ "description": "Retrieves the list of interconnect remote locations available to the specified project.", ++ "flatPath": "projects/{project}/global/interconnectRemoteLocations", ++ "httpMethod": "GET", ++ "id": "compute.interconnectRemoteLocations.list", ++ "parameterOrder": [ ++ "project" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } + }, ++ "path": "projects/{project}/global/interconnectRemoteLocations", + "response": { +- "$ref": "TestPermissionsResponse" ++ "$ref": "InterconnectRemoteLocationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -13013,20 +13538,20 @@ + } + } + }, +- "licenses": { ++ "interconnects": { + "methods": { + "delete": { +- "description": "Deletes the specified license. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses/{license}", ++ "description": "Deletes the specified Interconnect.", ++ "flatPath": "projects/{project}/global/interconnects/{interconnect}", + "httpMethod": "DELETE", +- "id": "compute.licenses.delete", ++ "id": "compute.interconnects.delete", + "parameterOrder": [ + "project", +- "license" ++ "interconnect" + ], + "parameters": { +- "license": { +- "description": "Name of the license resource to delete.", ++ "interconnect": { ++ "description": "Name of the interconnect to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -13045,7 +13570,7 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses/{license}", ++ "path": "projects/{project}/global/interconnects/{interconnect}", + "response": { + "$ref": "Operation" + }, +@@ -13055,17 +13580,17 @@ + ] + }, + "get": { +- "description": "Returns the specified License resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses/{license}", ++ "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", ++ "flatPath": "projects/{project}/global/interconnects/{interconnect}", + "httpMethod": "GET", +- "id": "compute.licenses.get", ++ "id": "compute.interconnects.get", + "parameterOrder": [ + "project", +- "license" ++ "interconnect" + ], + "parameters": { +- "license": { +- "description": "Name of the License resource to return.", ++ "interconnect": { ++ "description": "Name of the interconnect to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -13079,9 +13604,9 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses/{license}", ++ "path": "projects/{project}/global/interconnects/{interconnect}", + "response": { +- "$ref": "License" ++ "$ref": "Interconnect" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -13089,40 +13614,34 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "getIamPolicy": { +- "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ "getDiagnostics": { ++ "description": "Returns the interconnectDiagnostics for the specified Interconnect.", ++ "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + "httpMethod": "GET", +- "id": "compute.licenses.getIamPolicy", ++ "id": "compute.interconnects.getDiagnostics", + "parameterOrder": [ + "project", +- "resource" ++ "interconnect" + ], + "parameters": { +- "optionsRequestedPolicyVersion": { +- "description": "Requested IAM Policy version.", +- "format": "int32", +- "location": "query", +- "type": "integer" +- }, +- "project": { +- "description": "Project ID for this request.", ++ "interconnect": { ++ "description": "Name of the interconnect resource to query.", + "location": "path", +- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "resource": { +- "description": "Name or id of the resource for this request.", ++ "project": { ++ "description": "Project ID for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + "response": { +- "$ref": "Policy" ++ "$ref": "InterconnectsGetDiagnosticsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -13131,10 +13650,10 @@ + ] + }, + "insert": { +- "description": "Create a License resource in the specified project. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses", ++ "description": "Creates an Interconnect in the specified project using the data included in the request.", ++ "flatPath": "projects/{project}/global/interconnects", + "httpMethod": "POST", +- "id": "compute.licenses.insert", ++ "id": "compute.interconnects.insert", + "parameterOrder": [ + "project" + ], +@@ -13152,26 +13671,23 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses", ++ "path": "projects/{project}/global/interconnects", + "request": { +- "$ref": "License" ++ "$ref": "Interconnect" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/devstorage.full_control", +- "https://www.googleapis.com/auth/devstorage.read_only", +- "https://www.googleapis.com/auth/devstorage.read_write" ++ "https://www.googleapis.com/auth/compute" + ] + }, + "list": { +- "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses", ++ "description": "Retrieves the list of Interconnects available to the specified project.", ++ "flatPath": "projects/{project}/global/interconnects", + "httpMethod": "GET", +- "id": "compute.licenses.list", ++ "id": "compute.interconnects.list", + "parameterOrder": [ + "project" + ], +@@ -13212,9 +13728,9 @@ + "type": "boolean" + } + }, +- "path": "projects/{project}/global/licenses", ++ "path": "projects/{project}/global/interconnects", + "response": { +- "$ref": "LicensesListResponse" ++ "$ref": "InterconnectList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -13222,11 +13738,53 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "setIamPolicy": { +- "description": "Sets the access control policy on the specified resource. Replaces any existing policy. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ "patch": { ++ "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ "httpMethod": "PATCH", ++ "id": "compute.interconnects.patch", ++ "parameterOrder": [ ++ "project", ++ "interconnect" ++ ], ++ "parameters": { ++ "interconnect": { ++ "description": "Name of the interconnect to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/interconnects/{interconnect}", ++ "request": { ++ "$ref": "Interconnect" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "setLabels": { ++ "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", ++ "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", + "httpMethod": "POST", +- "id": "compute.licenses.setIamPolicy", ++ "id": "compute.interconnects.setLabels", + "parameterOrder": [ + "project", + "resource" +@@ -13247,12 +13805,12 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ "path": "projects/{project}/global/interconnects/{resource}/setLabels", + "request": { +- "$ref": "GlobalSetPolicyRequest" ++ "$ref": "GlobalSetLabelsRequest" + }, + "response": { +- "$ref": "Policy" ++ "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -13260,10 +13818,10 @@ + ] + }, + "testIamPermissions": { +- "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ "description": "Returns permissions that a caller has on the specified resource.", ++ "flatPath": "projects/{project}/global/interconnects/{resource}/testIamPermissions", + "httpMethod": "POST", +- "id": "compute.licenses.testIamPermissions", ++ "id": "compute.interconnects.testIamPermissions", + "parameterOrder": [ + "project", + "resource" +@@ -13284,7 +13842,7 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ "path": "projects/{project}/global/interconnects/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, +@@ -13299,22 +13857,22 @@ + } + } + }, +- "machineImages": { ++ "licenseCodes": { + "methods": { +- "delete": { +- "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", +- "flatPath": "projects/{project}/global/machineImages/{machineImage}", +- "httpMethod": "DELETE", +- "id": "compute.machineImages.delete", ++ "get": { ++ "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenseCodes/{licenseCode}", ++ "httpMethod": "GET", ++ "id": "compute.licenseCodes.get", + "parameterOrder": [ + "project", +- "machineImage" ++ "licenseCode" + ], + "parameters": { +- "machineImage": { +- "description": "The name of the machine image to delete.", ++ "licenseCode": { ++ "description": "Number corresponding to the License code resource to return.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[0-9]{0,61}?", + "required": true, + "type": "string" + }, +@@ -13324,7 +13882,370 @@ + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" +- }, ++ } ++ }, ++ "path": "projects/{project}/global/licenseCodes/{licenseCode}", ++ "response": { ++ "$ref": "LicenseCode" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", ++ "httpMethod": "POST", ++ "id": "compute.licenseCodes.testIamPermissions", ++ "parameterOrder": [ ++ "project", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", ++ "request": { ++ "$ref": "TestPermissionsRequest" ++ }, ++ "response": { ++ "$ref": "TestPermissionsResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ } ++ } ++ }, ++ "licenses": { ++ "methods": { ++ "delete": { ++ "description": "Deletes the specified license. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses/{license}", ++ "httpMethod": "DELETE", ++ "id": "compute.licenses.delete", ++ "parameterOrder": [ ++ "project", ++ "license" ++ ], ++ "parameters": { ++ "license": { ++ "description": "Name of the license resource to delete.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses/{license}", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "get": { ++ "description": "Returns the specified License resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses/{license}", ++ "httpMethod": "GET", ++ "id": "compute.licenses.get", ++ "parameterOrder": [ ++ "project", ++ "license" ++ ], ++ "parameters": { ++ "license": { ++ "description": "Name of the License resource to return.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses/{license}", ++ "response": { ++ "$ref": "License" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "getIamPolicy": { ++ "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ "httpMethod": "GET", ++ "id": "compute.licenses.getIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "resource" ++ ], ++ "parameters": { ++ "optionsRequestedPolicyVersion": { ++ "description": "Requested IAM Policy version.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Create a License resource in the specified project. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses", ++ "httpMethod": "POST", ++ "id": "compute.licenses.insert", ++ "parameterOrder": [ ++ "project" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses", ++ "request": { ++ "$ref": "License" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/devstorage.full_control", ++ "https://www.googleapis.com/auth/devstorage.read_only", ++ "https://www.googleapis.com/auth/devstorage.read_write" ++ ] ++ }, ++ "list": { ++ "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses", ++ "httpMethod": "GET", ++ "id": "compute.licenses.list", ++ "parameterOrder": [ ++ "project" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/global/licenses", ++ "response": { ++ "$ref": "LicensesListResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "setIamPolicy": { ++ "description": "Sets the access control policy on the specified resource. Replaces any existing policy. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ "httpMethod": "POST", ++ "id": "compute.licenses.setIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ "request": { ++ "$ref": "GlobalSetPolicyRequest" ++ }, ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ "httpMethod": "POST", ++ "id": "compute.licenses.testIamPermissions", ++ "parameterOrder": [ ++ "project", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ "request": { ++ "$ref": "TestPermissionsRequest" ++ }, ++ "response": { ++ "$ref": "TestPermissionsResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ } ++ } ++ }, ++ "machineImages": { ++ "methods": { ++ "delete": { ++ "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", ++ "flatPath": "projects/{project}/global/machineImages/{machineImage}", ++ "httpMethod": "DELETE", ++ "id": "compute.machineImages.delete", ++ "parameterOrder": [ ++ "project", ++ "machineImage" ++ ], ++ "parameters": { ++ "machineImage": { ++ "description": "The name of the machine image to delete.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", +@@ -19994,6 +20915,55 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", ++ "httpMethod": "POST", ++ "id": "compute.regionBackendServices.setSecurityPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "backendService" ++ ], ++ "parameters": { ++ "backendService": { ++ "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", ++ "request": { ++ "$ref": "SecurityPolicyReference" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/testIamPermissions", +@@ -20623,6 +21593,48 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "bulkInsert": { ++ "description": "Bulk create a set of disks.", ++ "flatPath": "projects/{project}/regions/{region}/disks/bulkInsert", ++ "httpMethod": "POST", ++ "id": "compute.regionDisks.bulkInsert", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/disks/bulkInsert", ++ "request": { ++ "$ref": "BulkInsertDiskResource" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "createSnapshot": { + "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", +@@ -21116,17 +22128,24 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "testIamPermissions": { +- "description": "Returns permissions that a caller has on the specified resource.", +- "flatPath": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", ++ "startAsyncReplication": { ++ "description": "Starts asynchronous replication. Must be invoked on the primary disk.", ++ "flatPath": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", + "httpMethod": "POST", +- "id": "compute.regionDisks.testIamPermissions", ++ "id": "compute.regionDisks.startAsyncReplication", + "parameterOrder": [ + "project", + "region", +- "resource" ++ "disk" + ], + "parameters": { ++ "disk": { ++ "description": "The name of the persistent disk.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -21141,32 +22160,29 @@ + "required": true, + "type": "string" + }, +- "resource": { +- "description": "Name or id of the resource for this request.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- "required": true, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", ++ "path": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", + "request": { +- "$ref": "TestPermissionsRequest" ++ "$ref": "RegionDisksStartAsyncReplicationRequest" + }, + "response": { +- "$ref": "TestPermissionsResponse" ++ "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" ++ "https://www.googleapis.com/auth/compute" + ] + }, +- "update": { +- "description": "Update the specified disk with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", +- "flatPath": "projects/{project}/regions/{region}/disks/{disk}", +- "httpMethod": "PATCH", +- "id": "compute.regionDisks.update", ++ "stopAsyncReplication": { ++ "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", ++ "flatPath": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", ++ "httpMethod": "POST", ++ "id": "compute.regionDisks.stopAsyncReplication", + "parameterOrder": [ + "project", + "region", +@@ -21174,17 +22190,12 @@ + ], + "parameters": { + "disk": { +- "description": "The disk name for this request.", ++ "description": "The name of the persistent disk.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "paths": { +- "location": "query", +- "repeated": true, +- "type": "string" +- }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -21203,18 +22214,9 @@ + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" +- }, +- "updateMask": { +- "description": "update_mask indicates fields to be updated as part of this request.", +- "format": "google-fieldmask", +- "location": "query", +- "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/disks/{disk}", +- "request": { +- "$ref": "Disk" +- }, ++ "path": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", + "response": { + "$ref": "Operation" + }, +@@ -21222,28 +22224,17 @@ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] +- } +- } +- }, +- "regionHealthCheckServices": { +- "methods": { +- "delete": { +- "description": "Deletes the specified regional HealthCheckService.", +- "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", +- "httpMethod": "DELETE", +- "id": "compute.regionHealthCheckServices.delete", ++ }, ++ "stopGroupAsyncReplication": { ++ "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", ++ "flatPath": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", ++ "httpMethod": "POST", ++ "id": "compute.regionDisks.stopGroupAsyncReplication", + "parameterOrder": [ + "project", +- "region", +- "healthCheckService" ++ "region" + ], + "parameters": { +- "healthCheckService": { +- "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", +- "location": "path", +- "required": true, +- "type": "string" +- }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -21252,7 +22243,7 @@ + "type": "string" + }, + "region": { +- "description": "Name of the region scoping this request.", ++ "description": "The name of the region for this request. This must be the region of the primary or secondary disks in the consistency group.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, +@@ -21264,7 +22255,10 @@ + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", ++ "path": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", ++ "request": { ++ "$ref": "DisksStopGroupAsyncReplicationResource" ++ }, + "response": { + "$ref": "Operation" + }, +@@ -21273,23 +22267,17 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "get": { +- "description": "Returns the specified regional HealthCheckService resource.", +- "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", +- "httpMethod": "GET", +- "id": "compute.regionHealthCheckServices.get", ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource.", ++ "flatPath": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", ++ "httpMethod": "POST", ++ "id": "compute.regionDisks.testIamPermissions", + "parameterOrder": [ + "project", + "region", +- "healthCheckService" ++ "resource" + ], + "parameters": { +- "healthCheckService": { +- "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", +- "location": "path", +- "required": true, +- "type": "string" +- }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -21298,16 +22286,26 @@ + "type": "string" + }, + "region": { +- "description": "Name of the region scoping this request.", ++ "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", ++ "path": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", ++ "request": { ++ "$ref": "TestPermissionsRequest" ++ }, + "response": { +- "$ref": "HealthCheckService" ++ "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -21315,16 +22313,29 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "insert": { +- "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", +- "flatPath": "projects/{project}/regions/{region}/healthCheckServices", +- "httpMethod": "POST", +- "id": "compute.regionHealthCheckServices.insert", ++ "update": { ++ "description": "Update the specified disk with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", ++ "flatPath": "projects/{project}/regions/{region}/disks/{disk}", ++ "httpMethod": "PATCH", ++ "id": "compute.regionDisks.update", + "parameterOrder": [ + "project", +- "region" ++ "region", ++ "disk" + ], + "parameters": { ++ "disk": { ++ "description": "The disk name for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "paths": { ++ "location": "query", ++ "repeated": true, ++ "type": "string" ++ }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -21333,7 +22344,7 @@ + "type": "string" + }, + "region": { +- "description": "Name of the region scoping this request.", ++ "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, +@@ -21343,11 +22354,17 @@ + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" ++ }, ++ "updateMask": { ++ "description": "update_mask indicates fields to be updated as part of this request.", ++ "format": "google-fieldmask", ++ "location": "query", ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/healthCheckServices", ++ "path": "projects/{project}/regions/{region}/disks/{disk}", + "request": { +- "$ref": "HealthCheckService" ++ "$ref": "Disk" + }, + "response": { + "$ref": "Operation" +@@ -21356,38 +22373,26 @@ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] +- }, +- "list": { +- "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", +- "flatPath": "projects/{project}/regions/{region}/healthCheckServices", +- "httpMethod": "GET", +- "id": "compute.regionHealthCheckServices.list", ++ } ++ } ++ }, ++ "regionHealthCheckServices": { ++ "methods": { ++ "delete": { ++ "description": "Deletes the specified regional HealthCheckService.", ++ "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", ++ "httpMethod": "DELETE", ++ "id": "compute.regionHealthCheckServices.delete", + "parameterOrder": [ + "project", +- "region" ++ "region", ++ "healthCheckService" + ], + "parameters": { +- "filter": { +- "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- "location": "query", +- "type": "string" +- }, +- "maxResults": { +- "default": "500", +- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- "format": "uint32", +- "location": "query", +- "minimum": "0", +- "type": "integer" +- }, +- "orderBy": { +- "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- "location": "query", +- "type": "string" +- }, +- "pageToken": { +- "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +- "location": "query", ++ "healthCheckService": { ++ "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", ++ "location": "path", ++ "required": true, + "type": "string" + }, + "project": { +@@ -21404,27 +22409,173 @@ + "required": true, + "type": "string" + }, +- "returnPartialSuccess": { +- "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", +- "type": "boolean" ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/healthCheckServices", ++ "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "response": { +- "$ref": "HealthCheckServicesList" ++ "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" ++ "https://www.googleapis.com/auth/compute" + ] + }, +- "patch": { +- "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ "get": { ++ "description": "Returns the specified regional HealthCheckService resource.", + "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", +- "httpMethod": "PATCH", +- "id": "compute.regionHealthCheckServices.patch", ++ "httpMethod": "GET", ++ "id": "compute.regionHealthCheckServices.get", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "healthCheckService" ++ ], ++ "parameters": { ++ "healthCheckService": { ++ "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", ++ "response": { ++ "$ref": "HealthCheckService" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", ++ "flatPath": "projects/{project}/regions/{region}/healthCheckServices", ++ "httpMethod": "POST", ++ "id": "compute.regionHealthCheckServices.insert", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/healthCheckServices", ++ "request": { ++ "$ref": "HealthCheckService" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "list": { ++ "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", ++ "flatPath": "projects/{project}/regions/{region}/healthCheckServices", ++ "httpMethod": "GET", ++ "id": "compute.regionHealthCheckServices.list", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/healthCheckServices", ++ "response": { ++ "$ref": "HealthCheckServicesList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "patch": { ++ "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", ++ "httpMethod": "PATCH", ++ "id": "compute.regionHealthCheckServices.patch", + "parameterOrder": [ + "project", + "region", +@@ -22737,6 +23888,7 @@ + ] + }, + "setAutoHealingPolicies": { ++ "deprecated": true, + "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use regionInstanceGroupManagers.patch instead.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + "httpMethod": "POST", +@@ -23542,22 +24694,23 @@ + } + } + }, +- "regionNetworkEndpointGroups": { ++ "regionInstantSnapshots": { + "methods": { + "delete": { +- "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", +- "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "description": "Deletes the specified InstantSnapshot resource. Keep in mind that deleting a single instantSnapshot might not necessarily delete all the data on that instantSnapshot. If any data on the instantSnapshot that is marked for deletion is needed for subsequent instantSnapshots, the data will be moved to the next corresponding instantSnapshot. For more information, see Deleting instantSnapshots.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + "httpMethod": "DELETE", +- "id": "compute.regionNetworkEndpointGroups.delete", ++ "id": "compute.regionInstantSnapshots.delete", + "parameterOrder": [ + "project", + "region", +- "networkEndpointGroup" ++ "instantSnapshot" + ], + "parameters": { +- "networkEndpointGroup": { +- "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", ++ "instantSnapshot": { ++ "description": "Name of the InstantSnapshot resource to delete.", + "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +@@ -23569,8 +24722,9 @@ + "type": "string" + }, + "region": { +- "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", ++ "description": "The name of the region for this request.", + "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, +@@ -23580,7 +24734,57 @@ + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "export": { ++ "description": "Export the changed blocks between two instant snapshots to a customer's bucket in the user specified format.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}/export", ++ "httpMethod": "POST", ++ "id": "compute.regionInstantSnapshots.export", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "instantSnapshot" ++ ], ++ "parameters": { ++ "instantSnapshot": { ++ "description": "Name of the instant snapshot to export.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}/export", ++ "request": { ++ "$ref": "RegionInstantSnapshotsExportRequest" ++ }, + "response": { + "$ref": "Operation" + }, +@@ -23590,19 +24794,20 @@ + ] + }, + "get": { +- "description": "Returns the specified network endpoint group.", +- "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "description": "Returns the specified InstantSnapshot resource in the specified region.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + "httpMethod": "GET", +- "id": "compute.regionNetworkEndpointGroups.get", ++ "id": "compute.regionInstantSnapshots.get", + "parameterOrder": [ + "project", + "region", +- "networkEndpointGroup" ++ "instantSnapshot" + ], + "parameters": { +- "networkEndpointGroup": { +- "description": "The name of the network endpoint group. It should comply with RFC1035.", ++ "instantSnapshot": { ++ "description": "Name of the InstantSnapshot resource to return.", + "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +@@ -23614,15 +24819,65 @@ + "type": "string" + }, + "region": { +- "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", ++ "description": "The name of the region for this request.", + "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + "response": { +- "$ref": "NetworkEndpointGroup" ++ "$ref": "InstantSnapshot" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "getIamPolicy": { ++ "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/getIamPolicy", ++ "httpMethod": "GET", ++ "id": "compute.regionInstantSnapshots.getIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "resource" ++ ], ++ "parameters": { ++ "optionsRequestedPolicyVersion": { ++ "description": "Requested IAM Policy version.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/getIamPolicy", ++ "response": { ++ "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -23631,10 +24886,10 @@ + ] + }, + "insert": { +- "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", +- "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "description": "Creates an instant snapshot in the specified region.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots", + "httpMethod": "POST", +- "id": "compute.regionNetworkEndpointGroups.insert", ++ "id": "compute.regionInstantSnapshots.insert", + "parameterOrder": [ + "project", + "region" +@@ -23648,8 +24903,9 @@ + "type": "string" + }, + "region": { +- "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", ++ "description": "Name of the region for this request.", + "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, +@@ -23659,9 +24915,9 @@ + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "path": "projects/{project}/regions/{region}/instantSnapshots", + "request": { +- "$ref": "NetworkEndpointGroup" ++ "$ref": "InstantSnapshot" + }, + "response": { + "$ref": "Operation" +@@ -23672,10 +24928,345 @@ + ] + }, + "list": { +- "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", +- "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "description": "Retrieves the list of InstantSnapshot resources contained within the specified region.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots", + "httpMethod": "GET", +- "id": "compute.regionNetworkEndpointGroups.list", ++ "id": "compute.regionInstantSnapshots.list", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots", ++ "response": { ++ "$ref": "InstantSnapshotList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "setIamPolicy": { ++ "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setIamPolicy", ++ "httpMethod": "POST", ++ "id": "compute.regionInstantSnapshots.setIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setIamPolicy", ++ "request": { ++ "$ref": "RegionSetPolicyRequest" ++ }, ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "setLabels": { ++ "description": "Sets the labels on a instantSnapshot in the given region. To learn more about labels, read the Labeling Resources documentation.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setLabels", ++ "httpMethod": "POST", ++ "id": "compute.regionInstantSnapshots.setLabels", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setLabels", ++ "request": { ++ "$ref": "RegionSetLabelsRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/testIamPermissions", ++ "httpMethod": "POST", ++ "id": "compute.regionInstantSnapshots.testIamPermissions", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/testIamPermissions", ++ "request": { ++ "$ref": "TestPermissionsRequest" ++ }, ++ "response": { ++ "$ref": "TestPermissionsResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ } ++ } ++ }, ++ "regionNetworkEndpointGroups": { ++ "methods": { ++ "delete": { ++ "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", ++ "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "httpMethod": "DELETE", ++ "id": "compute.regionNetworkEndpointGroups.delete", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "networkEndpointGroup" ++ ], ++ "parameters": { ++ "networkEndpointGroup": { ++ "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "get": { ++ "description": "Returns the specified network endpoint group.", ++ "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "httpMethod": "GET", ++ "id": "compute.regionNetworkEndpointGroups.get", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "networkEndpointGroup" ++ ], ++ "parameters": { ++ "networkEndpointGroup": { ++ "description": "The name of the network endpoint group. It should comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "response": { ++ "$ref": "NetworkEndpointGroup" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", ++ "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "httpMethod": "POST", ++ "id": "compute.regionNetworkEndpointGroups.insert", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "request": { ++ "$ref": "NetworkEndpointGroup" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "list": { ++ "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", ++ "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "httpMethod": "GET", ++ "id": "compute.regionNetworkEndpointGroups.list", + "parameterOrder": [ + "project", + "region" +@@ -25032,6 +26623,56 @@ + }, + "regionSecurityPolicies": { + "methods": { ++ "addRule": { ++ "description": "Inserts a rule into a security policy.", ++ "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", ++ "httpMethod": "POST", ++ "id": "compute.regionSecurityPolicies.addRule", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "securityPolicy" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "securityPolicy": { ++ "description": "Name of the security policy to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "validateOnly": { ++ "description": "If true, the request will not be committed.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", ++ "request": { ++ "$ref": "SecurityPolicyRule" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "delete": { + "description": "Deletes the specified policy.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", +@@ -25122,6 +26763,55 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "getRule": { ++ "description": "Gets a rule at the specified priority.", ++ "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", ++ "httpMethod": "GET", ++ "id": "compute.regionSecurityPolicies.getRule", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "securityPolicy" ++ ], ++ "parameters": { ++ "priority": { ++ "description": "The priority of the rule to get from the security policy.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "securityPolicy": { ++ "description": "Name of the security policy to which the queried rule belongs.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", ++ "response": { ++ "$ref": "SecurityPolicyRule" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, + "insert": { + "description": "Creates a new policy in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies", +@@ -25281,6 +26971,110 @@ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] ++ }, ++ "patchRule": { ++ "description": "Patches a rule at the specified priority. To clear fields in the rule, leave the fields empty and specify them in the updateMask.", ++ "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", ++ "httpMethod": "POST", ++ "id": "compute.regionSecurityPolicies.patchRule", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "securityPolicy" ++ ], ++ "parameters": { ++ "priority": { ++ "description": "The priority of the rule to patch.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "securityPolicy": { ++ "description": "Name of the security policy to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "validateOnly": { ++ "description": "If true, the request will not be committed.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", ++ "request": { ++ "$ref": "SecurityPolicyRule" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "removeRule": { ++ "description": "Deletes a rule at the specified priority.", ++ "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", ++ "httpMethod": "POST", ++ "id": "compute.regionSecurityPolicies.removeRule", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "securityPolicy" ++ ], ++ "parameters": { ++ "priority": { ++ "description": "The priority of the rule to remove from the security policy.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "securityPolicy": { ++ "description": "Name of the security policy to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] + } + } + }, +@@ -33047,15 +34841,15 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "testIamPermissions": { +- "description": "Returns permissions that a caller has on the specified resource.", +- "flatPath": "projects/{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified target instance. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", + "httpMethod": "POST", +- "id": "compute.targetInstances.testIamPermissions", ++ "id": "compute.targetInstances.setSecurityPolicy", + "parameterOrder": [ + "project", + "zone", +- "resource" ++ "targetInstance" + ], + "parameters": { + "project": { +@@ -33065,47 +34859,46 @@ + "required": true, + "type": "string" + }, +- "resource": { +- "description": "Name or id of the resource for this request.", ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "targetInstance": { ++ "description": "Name of the TargetInstance resource to which the security policy should be set. The name should conform to RFC1035.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { +- "description": "The name of the zone for this request.", ++ "description": "Name of the zone scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", ++ "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", + "request": { +- "$ref": "TestPermissionsRequest" ++ "$ref": "SecurityPolicyReference" + }, + "response": { +- "$ref": "TestPermissionsResponse" ++ "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" ++ "https://www.googleapis.com/auth/compute" + ] +- } +- } +- }, +- "targetPools": { +- "methods": { +- "addHealthCheck": { +- "description": "Adds health check URLs to a target pool.", +- "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", ++ }, ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource.", ++ "flatPath": "projects/{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", + "httpMethod": "POST", +- "id": "compute.targetPools.addHealthCheck", ++ "id": "compute.targetInstances.testIamPermissions", + "parameterOrder": [ + "project", +- "region", +- "targetPool" ++ "zone", ++ "resource" + ], + "parameters": { + "project": { +@@ -33115,43 +34908,93 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", ++ "resource": { ++ "description": "Name or id of the resource for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "requestId": { +- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- "location": "query", +- "type": "string" +- }, +- "targetPool": { +- "description": "Name of the target pool to add a health check to.", ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", ++ "path": "projects/{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", + "request": { +- "$ref": "TargetPoolsAddHealthCheckRequest" ++ "$ref": "TestPermissionsRequest" + }, + "response": { +- "$ref": "Operation" ++ "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute" ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" + ] +- }, +- "addInstance": { +- "description": "Adds an instance to a target pool.", +- "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addInstance", ++ } ++ } ++ }, ++ "targetPools": { ++ "methods": { ++ "addHealthCheck": { ++ "description": "Adds health check URLs to a target pool.", ++ "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + "httpMethod": "POST", +- "id": "compute.targetPools.addInstance", ++ "id": "compute.targetPools.addHealthCheck", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "targetPool" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "targetPool": { ++ "description": "Name of the target pool to add a health check to.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", ++ "request": { ++ "$ref": "TargetPoolsAddHealthCheckRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "addInstance": { ++ "description": "Adds an instance to a target pool.", ++ "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addInstance", ++ "httpMethod": "POST", ++ "id": "compute.targetPools.addInstance", + "parameterOrder": [ + "project", + "region", +@@ -33654,6 +35497,55 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified target pool. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ "httpMethod": "POST", ++ "id": "compute.targetPools.setSecurityPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "targetPool" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "targetPool": { ++ "description": "Name of the TargetPool resource to which the security policy should be set. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ "request": { ++ "$ref": "SecurityPolicyReference" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{resource}/testIamPermissions", +@@ -36287,7 +38179,7 @@ + } + } + }, +- "revision": "20230307", ++ "revision": "20230516", + "rootUrl": "https://compute.googleapis.com/", + "schemas": { + "AWSV4Signature": { +@@ -36730,11 +38622,11 @@ + "id": "AccessConfig", + "properties": { + "externalIpv6": { +- "description": "The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", ++ "description": "Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", + "type": "string" + }, + "externalIpv6PrefixLength": { +- "description": "The prefix length of the external IPv6 range.", ++ "description": "Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range.", + "format": "int32", + "type": "integer" + }, +@@ -36744,11 +38636,11 @@ + "type": "string" + }, + "name": { +- "description": "The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.", ++ "description": "The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6.", + "type": "string" + }, + "natIP": { +- "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", ++ "description": "Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", + "type": "string" + }, + "networkTier": { +@@ -36771,13 +38663,16 @@ + "description": "The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range.", + "type": "string" + }, ++ "securityPolicy": { ++ "description": "[Output Only] The resource URL for the security policy associated with this access config.", ++ "type": "string" ++ }, + "setPublicPtr": { + "description": "Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated.", + "type": "boolean" + }, + "type": { +- "default": "ONE_TO_ONE_NAT", +- "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", ++ "description": "The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6.", + "enum": [ + "DIRECT_IPV6", + "ONE_TO_ONE_NAT" +@@ -37575,6 +39470,18 @@ + ], + "type": "string" + }, ++ "savedState": { ++ "description": "For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api.", ++ "enum": [ ++ "DISK_SAVED_STATE_UNSPECIFIED", ++ "PRESERVED" ++ ], ++ "enumDescriptions": [ ++ "*[Default]* Disk state has not been preserved.", ++ "Disk state has been preserved." ++ ], ++ "type": "string" ++ }, + "shieldedInstanceInitialState": { + "$ref": "InitialStateConfig", + "description": "[Output Only] shielded vm initial state stored on disk" +@@ -37689,6 +39596,13 @@ + "format": "int64", + "type": "string" + }, ++ "replicaZones": { ++ "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. You can't use this option with boot disks.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "resourceManagerTags": { + "additionalProperties": { + "type": "string" +@@ -38308,7 +40222,7 @@ + "id": "AutoscalingPolicy", + "properties": { + "coolDownPeriodSec": { +- "description": "The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", ++ "description": "The number of seconds that your application takes to initialize on a VM instance. This is referred to as the [initialization period](/compute/docs/autoscaler#cool_down_period). Specifying an accurate initialization period improves autoscaler decisions. For example, when scaling out, the autoscaler ignores data from VMs that are still initializing because those VMs might not yet represent normal usage of your application. The default initialization period is 60 seconds. Initialization periods might vary because of numerous factors. We recommend that you test how long your application takes to initialize. To do this, create a VM and time your application's startup process.", + "format": "int32", + "type": "integer" + }, +@@ -38338,7 +40252,7 @@ + "type": "integer" + }, + "mode": { +- "description": "Defines operating mode for this policy.", ++ "description": "Defines the operating mode for this policy. The following modes are available: - OFF: Disables the autoscaler but maintains its configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM instances only. - ON: Enables all autoscaler activities according to its policy. For more information, see \"Turning off or restricting an autoscaler\"", + "enum": [ + "OFF", + "ON", +@@ -39068,6 +40982,13 @@ + "$ref": "Duration", + "description": "Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED." + }, ++ "metadatas": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Deployment metadata associated with the resource to be set by a GKE hub controller and read by the backend RCTH", ++ "type": "object" ++ }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +@@ -40117,6 +42038,17 @@ + }, + "type": "object" + }, ++ "BulkInsertDiskResource": { ++ "description": "A transient resource used in compute.disks.bulkInsert and compute.regionDisks.bulkInsert. It is only used to process requests and is not persisted.", ++ "id": "BulkInsertDiskResource", ++ "properties": { ++ "sourceConsistencyGroupPolicy": { ++ "description": "The URL of the DiskConsistencyGroupPolicy for the group of disks to clone. This may be a full or partial URL, such as: - https://www.googleapis.com/compute/v1/projects/project/regions/region /resourcePolicies/resourcePolicy - projects/project/regions/region/resourcePolicies/resourcePolicy - regions/region/resourcePolicies/resourcePolicy ", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "BulkInsertInstanceResource": { + "description": "A transient resource used in compute.instances.bulkInsert and compute.regionInstances.bulkInsert . This resource is not persisted anywhere, it is used only for processing the requests.", + "id": "BulkInsertInstanceResource", +@@ -40161,6 +42093,10 @@ + "description": "Per-instance properties to be set on individual instances. To be extended in the future.", + "id": "BulkInsertInstanceResourcePerInstanceProperties", + "properties": { ++ "hostname": { ++ "description": "Specifies the hostname of the instance. More details in: https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention", ++ "type": "string" ++ }, + "name": { + "description": "This field is only temporary. It will be removed. Do not use it.", + "type": "string" +@@ -40376,7 +42312,7 @@ + "type": "string" + }, + "splitSourceCommitment": { +- "description": "Source commitment to be splitted into a new commitment.", ++ "description": "Source commitment to be split into a new commitment.", + "type": "string" + }, + "startTimestamp": { +@@ -40417,6 +42353,7 @@ + "GENERAL_PURPOSE_N2", + "GENERAL_PURPOSE_N2D", + "GENERAL_PURPOSE_T2D", ++ "GRAPHICS_OPTIMIZED", + "MEMORY_OPTIMIZED", + "MEMORY_OPTIMIZED_M3", + "TYPE_UNSPECIFIED" +@@ -40433,6 +42370,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -41128,6 +43066,17 @@ + ], + "type": "string" + }, ++ "asyncPrimaryDisk": { ++ "$ref": "DiskAsyncReplication", ++ "description": "Disk asynchronously replicated into this disk." ++ }, ++ "asyncSecondaryDisks": { ++ "additionalProperties": { ++ "$ref": "DiskAsyncReplicationList" ++ }, ++ "description": "[Output Only] A list of disks this disk is asynchronously replicated to.", ++ "type": "object" ++ }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" +@@ -41273,6 +43222,10 @@ + }, + "type": "array" + }, ++ "resourceStatus": { ++ "$ref": "DiskResourceStatus", ++ "description": "[Output Only] Status information for the disk resource." ++ }, + "satisfiesPzs": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" +@@ -41286,6 +43239,14 @@ + "format": "int64", + "type": "string" + }, ++ "sourceConsistencyGroupPolicy": { ++ "description": "[Output Only] URL of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", ++ "type": "string" ++ }, ++ "sourceConsistencyGroupPolicyId": { ++ "description": "[Output Only] ID of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", ++ "type": "string" ++ }, + "sourceDisk": { + "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", + "type": "string" +@@ -41306,6 +43267,14 @@ + "description": "[Output Only] The ID value of the image used to create this disk. This value identifies the exact image that was used to create this persistent disk. For example, if you created the persistent disk from an image that was later deleted and recreated under the same name, the source image ID would identify the exact version of the image that was used.", + "type": "string" + }, ++ "sourceInstantSnapshot": { ++ "description": "The source instant snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instantSnapshots/instantSnapshot - projects/project/zones/zone/instantSnapshots/instantSnapshot - zones/zone/instantSnapshots/instantSnapshot ", ++ "type": "string" ++ }, ++ "sourceInstantSnapshotId": { ++ "description": "[Output Only] The unique ID of the instant snapshot used to create this disk. This value identifies the exact instant snapshot that was used to create this persistent disk. For example, if you created the persistent disk from an instant snapshot that was later deleted and recreated under the same name, the source instant snapshot ID would identify the exact version of the instant snapshot that was used.", ++ "type": "string" ++ }, + "sourceSnapshot": { + "description": "The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project /global/snapshots/snapshot - projects/project/global/snapshots/snapshot - global/snapshots/snapshot ", + "type": "string" +@@ -41504,6 +43473,37 @@ + }, + "type": "object" + }, ++ "DiskAsyncReplication": { ++ "id": "DiskAsyncReplication", ++ "properties": { ++ "consistencyGroupPolicy": { ++ "description": "[Output Only] URL of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, ++ "consistencyGroupPolicyId": { ++ "description": "[Output Only] ID of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, ++ "disk": { ++ "description": "The other disk asynchronously replicated to or from the current disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk ", ++ "type": "string" ++ }, ++ "diskId": { ++ "description": "[Output Only] The unique ID of the other disk asynchronously replicated to or from the current disk. This value identifies the exact disk that was used to create this replication. For example, if you started replicating the persistent disk from a disk that was later deleted and recreated under the same name, the disk ID would identify the exact version of the disk that was used.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "DiskAsyncReplicationList": { ++ "id": "DiskAsyncReplicationList", ++ "properties": { ++ "asyncReplicationDisk": { ++ "$ref": "DiskAsyncReplication" ++ } ++ }, ++ "type": "object" ++ }, + "DiskInstantiationConfig": { + "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", + "id": "DiskInstantiationConfig", +@@ -41693,6 +43693,47 @@ + }, + "type": "object" + }, ++ "DiskResourceStatus": { ++ "id": "DiskResourceStatus", ++ "properties": { ++ "asyncPrimaryDisk": { ++ "$ref": "DiskResourceStatusAsyncReplicationStatus" ++ }, ++ "asyncSecondaryDisks": { ++ "additionalProperties": { ++ "$ref": "DiskResourceStatusAsyncReplicationStatus" ++ }, ++ "description": "Key: disk, value: AsyncReplicationStatus message", ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "DiskResourceStatusAsyncReplicationStatus": { ++ "id": "DiskResourceStatusAsyncReplicationStatus", ++ "properties": { ++ "state": { ++ "enum": [ ++ "ACTIVE", ++ "CREATED", ++ "STARTING", ++ "STATE_UNSPECIFIED", ++ "STOPPED", ++ "STOPPING" ++ ], ++ "enumDescriptions": [ ++ "Replication is active.", ++ "Secondary disk is created and is waiting for replication to start.", ++ "Replication is starting.", ++ "", ++ "Replication is stopped.", ++ "Replication is stopping." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "DiskType": { + "description": "Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/beta/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/beta/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks.", + "id": "DiskType", +@@ -42236,6 +44277,27 @@ + }, + "type": "object" + }, ++ "DisksStartAsyncReplicationRequest": { ++ "id": "DisksStartAsyncReplicationRequest", ++ "properties": { ++ "asyncSecondaryDisk": { ++ "description": "The secondary disk to start asynchronous replication to. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "DisksStopGroupAsyncReplicationResource": { ++ "description": "A transient resource used in compute.disks.stopGroupAsyncReplication and compute.regionDisks.stopGroupAsyncReplication. It is only used to process requests and is not persisted.", ++ "id": "DisksStopGroupAsyncReplicationResource", ++ "properties": { ++ "resourcePolicy": { ++ "description": "The URL of the DiskConsistencyGroupPolicy for the group of disks to stop. This may be a full or partial URL, such as: - https://www.googleapis.com/compute/v1/projects/project/regions/region /resourcePolicies/resourcePolicy - projects/project/regions/region/resourcePolicies/resourcePolicy - regions/region/resourcePolicies/resourcePolicy ", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "DisplayDevice": { + "description": "A set of Display Device options", + "id": "DisplayDevice", +@@ -42594,6 +44656,10 @@ + "ipAddress": { + "description": "IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine.", + "type": "string" ++ }, ++ "ipv6Address": { ++ "description": "IPv6 address of the interface in the external VPN gateway. This IPv6 address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. Must specify an IPv6 address (not IPV4-mapped) using any format described in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0).", ++ "type": "string" + } + }, + "type": "object" +@@ -43337,6 +45403,10 @@ + "format": "int32", + "type": "integer" + }, ++ "securityProfileGroup": { ++ "description": "A fully-qualified URL of a SecurityProfile resource instance. Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.", ++ "type": "string" ++ }, + "targetResources": { + "description": "A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule.", + "items": { +@@ -43357,6 +45427,10 @@ + "type": "string" + }, + "type": "array" ++ }, ++ "tlsInspect": { ++ "description": "Boolean flag indicating if the traffic should be TLS decrypted. Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions.", ++ "type": "boolean" + } + }, + "type": "object" +@@ -43650,7 +45724,7 @@ + "type": "string" + }, + "network": { +- "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", ++ "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "type": "string" + }, + "networkTier": { +@@ -44160,6 +46234,20 @@ + }, + "type": "object" + }, ++ "GlobalAddressesMoveRequest": { ++ "id": "GlobalAddressesMoveRequest", ++ "properties": { ++ "description": { ++ "description": "An optional destination address description if intended to be different from the source.", ++ "type": "string" ++ }, ++ "destinationAddress": { ++ "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project /global/addresses/address - projects/project/global/addresses/address Note that destination project must be different from the source project. So /global/addresses/address is not valid partial url.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { + "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", + "properties": { +@@ -44317,13 +46405,14 @@ + "id": "GuestOsFeature", + "properties": { + "type": { +- "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling guest operating system features.", ++ "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.", + "enum": [ + "FEATURE_TYPE_UNSPECIFIED", + "GVNIC", + "MULTI_IP_SUBNET", + "SECURE_BOOT", + "SEV_CAPABLE", ++ "SEV_LIVE_MIGRATABLE", + "SEV_SNP_CAPABLE", + "UEFI_COMPATIBLE", + "VIRTIO_SCSI_MULTIQUEUE", +@@ -44338,6 +46427,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -44505,7 +46595,7 @@ + "type": "object" + }, + "HealthCheck": { +- "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/beta/healthChecks) * [Regional](/compute/docs/reference/rest/beta/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.HealthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", ++ "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/beta/healthChecks) * [Regional](/compute/docs/reference/rest/beta/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.healthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.healthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", + "id": "HealthCheck", + "properties": { + "checkIntervalSec": { +@@ -45212,7 +47302,7 @@ + "type": "string" + }, + "healthState": { +- "description": "Health state of the instance.", ++ "description": "Health state of the IPv4 address of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" +@@ -45285,10 +47375,26 @@ + "UNKNOWN" + ], + "enumDescriptions": [ +- "", +- "", +- "", +- "" ++ "Endpoint is being drained.", ++ "Endpoint is healthy.", ++ "Endpoint is unhealthy.", ++ "Health status of the endpoint is unknown." ++ ], ++ "type": "string" ++ }, ++ "ipv6HealthState": { ++ "description": "Health state of the ipv6 network endpoint determined based on the health checks configured.", ++ "enum": [ ++ "DRAINING", ++ "HEALTHY", ++ "UNHEALTHY", ++ "UNKNOWN" ++ ], ++ "enumDescriptions": [ ++ "Endpoint is being drained.", ++ "Endpoint is healthy.", ++ "Endpoint is unhealthy.", ++ "Health status of the endpoint is unknown." + ], + "type": "string" + } +@@ -46759,7 +48865,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -47655,7 +49761,7 @@ + "type": "string" + }, + "initialDelaySec": { +- "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", ++ "description": "The initial delay is the number of seconds that a new VM takes to initialize and run its startup script. During a VM's initial delay period, the MIG ignores unsuccessful health checks because the VM might be in the startup process. This prevents the MIG from prematurely recreating a VM. If the health check receives a healthy response during the initial delay, it indicates that the startup process is complete and the VM is ready. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.", + "format": "int32", + "type": "integer" + } +@@ -48064,7 +50170,7 @@ + "id": "InstanceGroupManagersDeleteInstancesRequest", + "properties": { + "instances": { +- "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", ++ "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have URL and can be deleted only by name. One cannot specify both URLs and names in a single request.", + "items": { + "type": "string" + }, +@@ -49647,193 +51753,819 @@ + }, + "type": "object" + }, +- "InstanceWithNamedPorts": { +- "id": "InstanceWithNamedPorts", +- "properties": { +- "instance": { +- "description": "[Output Only] The URL of the instance.", +- "type": "string" +- }, +- "namedPorts": { +- "description": "[Output Only] The named ports that belong to this instance group.", +- "items": { +- "$ref": "NamedPort" +- }, +- "type": "array" +- }, +- "status": { +- "description": "[Output Only] The status of the instance.", +- "enum": [ +- "DEPROVISIONING", +- "PROVISIONING", +- "REPAIRING", +- "RUNNING", +- "STAGING", +- "STOPPED", +- "STOPPING", +- "SUSPENDED", +- "SUSPENDING", +- "TERMINATED" +- ], +- "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", +- "Resources are being allocated for the instance.", +- "The instance is in repair.", +- "The instance is running.", +- "All required resources have been allocated and the instance is being started.", +- "The instance has stopped successfully.", +- "The instance is currently stopping (either being deleted or killed).", +- "The instance has suspended.", +- "The instance is suspending.", +- "The instance has stopped (either by explicit action or underlying failure)." +- ], +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InstancesAddResourcePoliciesRequest": { +- "id": "InstancesAddResourcePoliciesRequest", +- "properties": { +- "resourcePolicies": { +- "description": "Resource policies to be added to this instance.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "InstancesGetEffectiveFirewallsResponse": { +- "id": "InstancesGetEffectiveFirewallsResponse", +- "properties": { +- "firewallPolicys": { +- "description": "Effective firewalls from firewall policies.", +- "items": { +- "$ref": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy" +- }, +- "type": "array" +- }, +- "firewalls": { +- "description": "Effective firewalls on the instance.", +- "items": { +- "$ref": "Firewall" +- }, +- "type": "array" +- }, +- "organizationFirewalls": { +- "description": "Effective firewalls from organization policies.", +- "items": { +- "$ref": "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy": { +- "id": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy", +- "properties": { +- "displayName": { +- "description": "[Output Only] Deprecated, please use short name instead. The display name of the firewall policy.", +- "type": "string" +- }, +- "name": { +- "description": "[Output Only] The name of the firewall policy.", +- "type": "string" +- }, +- "rules": { +- "description": "The rules that apply to the network.", +- "items": { +- "$ref": "FirewallPolicyRule" +- }, +- "type": "array" +- }, +- "shortName": { +- "description": "[Output Only] The short name of the firewall policy.", +- "type": "string" +- }, +- "type": { +- "description": "[Output Only] The type of the firewall policy. Can be one of HIERARCHY, NETWORK, NETWORK_REGIONAL.", +- "enum": [ +- "HIERARCHY", +- "NETWORK", +- "NETWORK_REGIONAL", +- "UNSPECIFIED" +- ], +- "enumDescriptions": [ +- "", +- "", +- "", +- "" +- ], +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy": { +- "description": "A pruned SecurityPolicy containing ID and any applicable firewall rules.", +- "id": "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy", ++ "InstanceWithNamedPorts": { ++ "id": "InstanceWithNamedPorts", ++ "properties": { ++ "instance": { ++ "description": "[Output Only] The URL of the instance.", ++ "type": "string" ++ }, ++ "namedPorts": { ++ "description": "[Output Only] The named ports that belong to this instance group.", ++ "items": { ++ "$ref": "NamedPort" ++ }, ++ "type": "array" ++ }, ++ "status": { ++ "description": "[Output Only] The status of the instance.", ++ "enum": [ ++ "DEPROVISIONING", ++ "PROVISIONING", ++ "REPAIRING", ++ "RUNNING", ++ "STAGING", ++ "STOPPED", ++ "STOPPING", ++ "SUSPENDED", ++ "SUSPENDING", ++ "TERMINATED" ++ ], ++ "enumDescriptions": [ ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "Resources are being allocated for the instance.", ++ "The instance is in repair.", ++ "The instance is running.", ++ "All required resources have been allocated and the instance is being started.", ++ "The instance has stopped successfully.", ++ "The instance is currently stopping (either being deleted or killed).", ++ "The instance has suspended.", ++ "The instance is suspending.", ++ "The instance has stopped (either by explicit action or underlying failure)." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesAddResourcePoliciesRequest": { ++ "id": "InstancesAddResourcePoliciesRequest", ++ "properties": { ++ "resourcePolicies": { ++ "description": "Resource policies to be added to this instance.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesGetEffectiveFirewallsResponse": { ++ "id": "InstancesGetEffectiveFirewallsResponse", ++ "properties": { ++ "firewallPolicys": { ++ "description": "Effective firewalls from firewall policies.", ++ "items": { ++ "$ref": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy" ++ }, ++ "type": "array" ++ }, ++ "firewalls": { ++ "description": "Effective firewalls on the instance.", ++ "items": { ++ "$ref": "Firewall" ++ }, ++ "type": "array" ++ }, ++ "organizationFirewalls": { ++ "description": "Effective firewalls from organization policies.", ++ "items": { ++ "$ref": "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy": { ++ "id": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy", ++ "properties": { ++ "displayName": { ++ "description": "[Output Only] Deprecated, please use short name instead. The display name of the firewall policy.", ++ "type": "string" ++ }, ++ "name": { ++ "description": "[Output Only] The name of the firewall policy.", ++ "type": "string" ++ }, ++ "rules": { ++ "description": "The rules that apply to the network.", ++ "items": { ++ "$ref": "FirewallPolicyRule" ++ }, ++ "type": "array" ++ }, ++ "shortName": { ++ "description": "[Output Only] The short name of the firewall policy.", ++ "type": "string" ++ }, ++ "type": { ++ "description": "[Output Only] The type of the firewall policy. Can be one of HIERARCHY, NETWORK, NETWORK_REGIONAL.", ++ "enum": [ ++ "HIERARCHY", ++ "NETWORK", ++ "NETWORK_REGIONAL", ++ "UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy": { ++ "description": "A pruned SecurityPolicy containing ID and any applicable firewall rules.", ++ "id": "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy", ++ "properties": { ++ "id": { ++ "description": "The unique identifier for the security policy. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "rules": { ++ "description": "The rules that apply to the network.", ++ "items": { ++ "$ref": "SecurityPolicyRule" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesRemoveResourcePoliciesRequest": { ++ "id": "InstancesRemoveResourcePoliciesRequest", ++ "properties": { ++ "resourcePolicies": { ++ "description": "Resource policies to be removed from this instance.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesResumeRequest": { ++ "id": "InstancesResumeRequest", ++ "properties": { ++ "disks": { ++ "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key. In order to resume the instance, the disk url and its corresponding key must be provided. If the disk is not protected with a customer-supplied encryption key it should not be specified.", ++ "items": { ++ "$ref": "CustomerEncryptionKeyProtectedDisk" ++ }, ++ "type": "array" ++ }, ++ "instanceEncryptionKey": { ++ "$ref": "CustomerEncryptionKey", ++ "description": "Decrypts data associated with an instance that is protected with a customer-supplied encryption key. If the instance you are starting is protected with a customer-supplied encryption key, the correct key must be provided otherwise the instance resume will not succeed." ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesScopedList": { ++ "id": "InstancesScopedList", ++ "properties": { ++ "instances": { ++ "description": "[Output Only] A list of instances contained in this scope.", ++ "items": { ++ "$ref": "Instance" ++ }, ++ "type": "array" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning which replaces the list of instances when the list is empty.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetLabelsRequest": { ++ "id": "InstancesSetLabelsRequest", ++ "properties": { ++ "labelFingerprint": { ++ "description": "Fingerprint of the previous set of labels for this resource, used to prevent conflicts. Provide the latest fingerprint value when making a request to add or change labels.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetMachineResourcesRequest": { ++ "id": "InstancesSetMachineResourcesRequest", ++ "properties": { ++ "guestAccelerators": { ++ "description": "A list of the type and count of accelerator cards attached to the instance.", ++ "items": { ++ "$ref": "AcceleratorConfig" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetMachineTypeRequest": { ++ "id": "InstancesSetMachineTypeRequest", ++ "properties": { ++ "machineType": { ++ "description": "Full or partial URL of the machine type resource. See Machine Types for a full list of machine types. For example: zones/us-central1-f/machineTypes/n1-standard-1", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetMinCpuPlatformRequest": { ++ "id": "InstancesSetMinCpuPlatformRequest", ++ "properties": { ++ "minCpuPlatform": { ++ "description": "Minimum cpu/platform this instance should be started at.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetNameRequest": { ++ "id": "InstancesSetNameRequest", ++ "properties": { ++ "currentName": { ++ "description": "The current name of this resource, used to prevent conflicts. Provide the latest name when making a request to change name.", ++ "type": "string" ++ }, ++ "name": { ++ "description": "The name to be applied to the instance. Needs to be RFC 1035 compliant.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetSecurityPolicyRequest": { ++ "id": "InstancesSetSecurityPolicyRequest", ++ "properties": { ++ "networkInterfaces": { ++ "description": "The network interfaces that the security policy will be applied to. Network interfaces use the nicN naming format. You can only set a security policy for network interfaces with an access config.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "securityPolicy": { ++ "description": "A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetServiceAccountRequest": { ++ "id": "InstancesSetServiceAccountRequest", ++ "properties": { ++ "email": { ++ "description": "Email address of the service account.", ++ "type": "string" ++ }, ++ "scopes": { ++ "description": "The list of scopes to be made available for this service account.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesStartWithEncryptionKeyRequest": { ++ "id": "InstancesStartWithEncryptionKeyRequest", ++ "properties": { ++ "disks": { ++ "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key. In order to start the instance, the disk url and its corresponding key must be provided. If the disk is not protected with a customer-supplied encryption key it should not be specified.", ++ "items": { ++ "$ref": "CustomerEncryptionKeyProtectedDisk" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstantSnapshot": { ++ "description": "Represents a InstantSnapshot resource. You can use instant snapshots to create disk rollback points quickly..", ++ "id": "InstantSnapshot", ++ "properties": { ++ "architecture": { ++ "description": "[Output Only] The architecture of the instant snapshot. Valid values are ARM64 or X86_64.", ++ "enum": [ ++ "ARCHITECTURE_UNSPECIFIED", ++ "ARM64", ++ "X86_64" ++ ], ++ "enumDescriptions": [ ++ "Default value indicating Architecture is not set.", ++ "Machines with architecture ARM64", ++ "Machines with architecture X86_64" ++ ], ++ "type": "string" ++ }, ++ "creationTimestamp": { ++ "description": "[Output Only] Creation timestamp in RFC3339 text format.", ++ "type": "string" ++ }, ++ "description": { ++ "description": "An optional description of this resource. Provide this property when you create the resource.", ++ "type": "string" ++ }, ++ "diskSizeGb": { ++ "description": "[Output Only] Size of the source disk, specified in GB.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "kind": { ++ "default": "compute#instantSnapshot", ++ "description": "[Output Only] Type of the resource. Always compute#instantSnapshot for InstantSnapshot resources.", ++ "type": "string" ++ }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this InstantSnapshot, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a InstantSnapshot.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels to apply to this InstantSnapshot. These can be later modified by the setLabels method. Label values may be empty.", ++ "type": "object" ++ }, ++ "name": { ++ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "type": "string" ++ }, ++ "region": { ++ "description": "[Output Only] URL of the region where the instant snapshot resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", ++ "type": "string" ++ }, ++ "resourceStatus": { ++ "$ref": "InstantSnapshotResourceStatus", ++ "description": "[Output Only] Status information for the instant snapshot resource." ++ }, ++ "satisfiesPzs": { ++ "description": "[Output Only] Reserved for future use.", ++ "type": "boolean" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for the resource.", ++ "type": "string" ++ }, ++ "selfLinkWithId": { ++ "description": "[Output Only] Server-defined URL for this resource's resource id.", ++ "type": "string" ++ }, ++ "sourceDisk": { ++ "description": "URL of the source disk used to create this instant snapshot. Note that the source disk must be in the same zone/region as the instant snapshot to be created. This can be a full or valid partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", ++ "type": "string" ++ }, ++ "sourceDiskId": { ++ "description": "[Output Only] The ID value of the disk used to create this InstantSnapshot. This value may be used to determine whether the InstantSnapshot was taken from the current or a previous instance of a given disk name.", ++ "type": "string" ++ }, ++ "status": { ++ "description": "[Output Only] The status of the instantSnapshot. This can be CREATING, DELETING, FAILED, or READY.", ++ "enum": [ ++ "CREATING", ++ "DELETING", ++ "FAILED", ++ "READY" ++ ], ++ "enumDescriptions": [ ++ "InstantSnapshot creation is in progress.", ++ "InstantSnapshot is currently being deleted.", ++ "InstantSnapshot creation failed.", ++ "InstantSnapshot has been created successfully." ++ ], ++ "type": "string" ++ }, ++ "zone": { ++ "description": "[Output Only] URL of the zone where the instant snapshot resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstantSnapshotAggregatedList": { ++ "id": "InstantSnapshotAggregatedList", ++ "properties": { ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "additionalProperties": { ++ "$ref": "InstantSnapshotsScopedList", ++ "description": "[Output Only] Name of the scope containing this set of instantSnapshots." ++ }, ++ "description": "A list of InstantSnapshotsScopedList resources.", ++ "type": "object" ++ }, ++ "kind": { ++ "default": "compute#instantSnapshotAggregatedList", ++ "description": "[Output Only] Type of resource. Always compute#instantSnapshotAggregatedList for aggregated lists of instantSnapshots.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "unreachables": { ++ "description": "[Output Only] Unreachable resources.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstantSnapshotExportParams": { ++ "id": "InstantSnapshotExportParams", ++ "properties": { ++ "baseInstantSnapshot": { ++ "description": "An optional base instant snapshot that this resource is compared against. If not specified, all blocks of this resource are exported. The base instant snapshot and this resource must be created from the same disk. The base instant snapshot must be created earlier in time than this resource.", ++ "type": "string" ++ }, ++ "bucketName": { ++ "description": "The name of an existing bucket in Cloud Storage where the changed blocks will be stored. The Google Service Account must have read and write access to this bucket. The bucket has to be in the same region as this resource.", ++ "type": "string" ++ }, ++ "encryptionKey": { ++ "$ref": "CustomerEncryptionKey", ++ "description": "Encryption key used to encrypt the instant snapshot." ++ }, ++ "objectName": { ++ "description": "Name of the output Bigstore object storing the changed blocks. Object name must be less than 1024 bytes in length.", ++ "type": "string" ++ }, ++ "outputType": { ++ "description": "The format of the output file.", ++ "enum": [ ++ "INVALID", ++ "METADATA_AND_DATA", ++ "METADATA_ONLY" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstantSnapshotList": { ++ "description": "Contains a list of InstantSnapshot resources.", ++ "id": "InstantSnapshotList", ++ "properties": { ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "description": "A list of InstantSnapshot resources.", ++ "items": { ++ "$ref": "InstantSnapshot" ++ }, ++ "type": "array" ++ }, ++ "kind": { ++ "default": "compute#instantSnapshotList", ++ "description": "Type of resource.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstantSnapshotResourceStatus": { ++ "id": "InstantSnapshotResourceStatus", + "properties": { +- "id": { +- "description": "The unique identifier for the security policy. This identifier is defined by the server.", +- "format": "uint64", ++ "storageSizeBytes": { ++ "description": "[Output Only] The storage size of this instant snapshot.", ++ "format": "int64", + "type": "string" +- }, +- "rules": { +- "description": "The rules that apply to the network.", +- "items": { +- "$ref": "SecurityPolicyRule" +- }, +- "type": "array" + } + }, + "type": "object" + }, +- "InstancesRemoveResourcePoliciesRequest": { +- "id": "InstancesRemoveResourcePoliciesRequest", +- "properties": { +- "resourcePolicies": { +- "description": "Resource policies to be removed from this instance.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "InstancesResumeRequest": { +- "id": "InstancesResumeRequest", ++ "InstantSnapshotsExportRequest": { ++ "id": "InstantSnapshotsExportRequest", + "properties": { +- "disks": { +- "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key. In order to resume the instance, the disk url and its corresponding key must be provided. If the disk is not protected with a customer-supplied encryption key it should not be specified.", +- "items": { +- "$ref": "CustomerEncryptionKeyProtectedDisk" +- }, +- "type": "array" +- }, +- "instanceEncryptionKey": { +- "$ref": "CustomerEncryptionKey", +- "description": "Decrypts data associated with an instance that is protected with a customer-supplied encryption key. If the instance you are starting is protected with a customer-supplied encryption key, the correct key must be provided otherwise the instance resume will not succeed." ++ "exportParams": { ++ "$ref": "InstantSnapshotExportParams", ++ "description": "Parameters to export the changed blocks." + } + }, + "type": "object" + }, +- "InstancesScopedList": { +- "id": "InstancesScopedList", ++ "InstantSnapshotsScopedList": { ++ "id": "InstantSnapshotsScopedList", + "properties": { +- "instances": { +- "description": "[Output Only] A list of instances contained in this scope.", ++ "instantSnapshots": { ++ "description": "[Output Only] A list of instantSnapshots contained in this scope.", + "items": { +- "$ref": "Instance" ++ "$ref": "InstantSnapshot" + }, + "type": "array" + }, + "warning": { +- "description": "[Output Only] Informational warning which replaces the list of instances when the list is empty.", ++ "description": "[Output Only] Informational warning which replaces the list of instantSnapshots when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", +@@ -49924,100 +52656,6 @@ + }, + "type": "object" + }, +- "InstancesSetLabelsRequest": { +- "id": "InstancesSetLabelsRequest", +- "properties": { +- "labelFingerprint": { +- "description": "Fingerprint of the previous set of labels for this resource, used to prevent conflicts. Provide the latest fingerprint value when making a request to add or change labels.", +- "format": "byte", +- "type": "string" +- }, +- "labels": { +- "additionalProperties": { +- "type": "string" +- }, +- "type": "object" +- } +- }, +- "type": "object" +- }, +- "InstancesSetMachineResourcesRequest": { +- "id": "InstancesSetMachineResourcesRequest", +- "properties": { +- "guestAccelerators": { +- "description": "A list of the type and count of accelerator cards attached to the instance.", +- "items": { +- "$ref": "AcceleratorConfig" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "InstancesSetMachineTypeRequest": { +- "id": "InstancesSetMachineTypeRequest", +- "properties": { +- "machineType": { +- "description": "Full or partial URL of the machine type resource. See Machine Types for a full list of machine types. For example: zones/us-central1-f/machineTypes/n1-standard-1", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InstancesSetMinCpuPlatformRequest": { +- "id": "InstancesSetMinCpuPlatformRequest", +- "properties": { +- "minCpuPlatform": { +- "description": "Minimum cpu/platform this instance should be started at.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InstancesSetNameRequest": { +- "id": "InstancesSetNameRequest", +- "properties": { +- "currentName": { +- "description": "The current name of this resource, used to prevent conflicts. Provide the latest name when making a request to change name.", +- "type": "string" +- }, +- "name": { +- "description": "The name to be applied to the instance. Needs to be RFC 1035 compliant.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InstancesSetServiceAccountRequest": { +- "id": "InstancesSetServiceAccountRequest", +- "properties": { +- "email": { +- "description": "Email address of the service account.", +- "type": "string" +- }, +- "scopes": { +- "description": "The list of scopes to be made available for this service account.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "InstancesStartWithEncryptionKeyRequest": { +- "id": "InstancesStartWithEncryptionKeyRequest", +- "properties": { +- "disks": { +- "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key. In order to start the instance, the disk url and its corresponding key must be provided. If the disk is not protected with a customer-supplied encryption key it should not be specified.", +- "items": { +- "$ref": "CustomerEncryptionKeyProtectedDisk" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, + "Int64RangeMatch": { + "description": "HttpRouteRuleMatch criteria for field values that must stay within the specified integer range.", + "id": "Int64RangeMatch", +@@ -50036,7 +52674,7 @@ + "type": "object" + }, + "Interconnect": { +- "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", ++ "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the Google Cloud network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", + "id": "Interconnect", + "properties": { + "adminEnabled": { +@@ -50171,6 +52809,10 @@ + "format": "int32", + "type": "integer" + }, ++ "remoteLocation": { ++ "description": "Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.", ++ "type": "string" ++ }, + "requestedLinkCount": { + "description": "Target number of physical links in the link bundle, as requested by the customer.", + "format": "int32", +@@ -50265,6 +52907,10 @@ + "description": "This field is not available.", + "type": "string" + }, ++ "configurationConstraints": { ++ "$ref": "InterconnectAttachmentConfigurationConstraints", ++ "description": "[Output Only] Constraints for this attachment, if any. The attachment does not work if these constraints are not met." ++ }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" +@@ -50330,7 +52976,7 @@ + "type": "string" + }, + "ipsecInternalAddresses": { +- "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool. Not currently available publicly. ", ++ "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool.", + "items": { + "type": "string" + }, +@@ -50396,6 +53042,10 @@ + "description": "[Output Only] URL of the region where the regional interconnect attachment resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, ++ "remoteService": { ++ "description": "[Output Only] If the attachment is on a Cross-Cloud Interconnect connection, this field contains the interconnect's remote location service provider. Example values: \"Amazon Web Services\" \"Microsoft Azure\". The field is set only for attachments on Cross-Cloud Interconnect connections. Its value is copied from the InterconnectRemoteLocation remoteService field.", ++ "type": "string" ++ }, + "router": { + "description": "URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network \u0026 region within which the Cloud Router is configured.", + "type": "string" +@@ -50442,6 +53092,11 @@ + ], + "type": "string" + }, ++ "subnetLength": { ++ "description": "Length of the IPv4 subnet mask. Allowed values: - 29 (default) - 30 The default value is 29, except for Cross-Cloud Interconnect connections that use an InterconnectRemoteLocation with a constraints.subnetLengthRange.min equal to 30. For example, connections that use an Azure remote location fall into this category. In these cases, the default value is 30, and requesting 29 returns an error. Where both 29 and 30 are allowed, 29 is preferred, because it gives Google Cloud Support more debugging visibility. ", ++ "format": "int32", ++ "type": "integer" ++ }, + "type": { + "description": "The type of interconnect attachment this is, which can take one of the following values: - DEDICATED: an attachment to a Dedicated Interconnect. - PARTNER: an attachment to a Partner Interconnect, created by the customer. - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner. ", + "enum": [ +@@ -50591,6 +53246,47 @@ + }, + "type": "object" + }, ++ "InterconnectAttachmentConfigurationConstraints": { ++ "id": "InterconnectAttachmentConfigurationConstraints", ++ "properties": { ++ "bgpMd5": { ++ "description": "[Output Only] Whether the attachment's BGP session requires/allows/disallows BGP MD5 authentication. This can take one of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. For example, a Cross-Cloud Interconnect connection to a remote cloud provider that requires BGP MD5 authentication has the interconnectRemoteLocation attachment_configuration_constraints.bgp_md5 field set to MD5_REQUIRED, and that property is propagated to the attachment. Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 is requested.", ++ "enum": [ ++ "MD5_OPTIONAL", ++ "MD5_REQUIRED", ++ "MD5_UNSUPPORTED" ++ ], ++ "enumDescriptions": [ ++ "MD5_OPTIONAL: BGP MD5 authentication is supported and can optionally be configured.", ++ "MD5_REQUIRED: BGP MD5 authentication must be configured.", ++ "MD5_UNSUPPORTED: BGP MD5 authentication must not be configured" ++ ], ++ "type": "string" ++ }, ++ "bgpPeerAsnRanges": { ++ "description": "[Output Only] List of ASN ranges that the remote location is known to support. Formatted as an array of inclusive ranges {min: min-value, max: max-value}. For example, [{min: 123, max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or anything in the range 64512-65534. This field is only advisory. Although the API accepts other ranges, these are the ranges that we recommend.", ++ "items": { ++ "$ref": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange": { ++ "id": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange", ++ "properties": { ++ "max": { ++ "format": "uint32", ++ "type": "integer" ++ }, ++ "min": { ++ "format": "uint32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "InterconnectAttachmentList": { + "description": "Response to the list request, and contains a list of interconnect attachments.", + "id": "InterconnectAttachmentList", +@@ -51374,111 +54070,413 @@ + }, + "type": "object" + }, +- "InterconnectLocationRegionInfo": { +- "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", +- "id": "InterconnectLocationRegionInfo", ++ "InterconnectLocationRegionInfo": { ++ "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", ++ "id": "InterconnectLocationRegionInfo", ++ "properties": { ++ "expectedRttMs": { ++ "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "locationPresence": { ++ "description": "Identifies the network presence of this location.", ++ "enum": [ ++ "GLOBAL", ++ "LOCAL_REGION", ++ "LP_GLOBAL", ++ "LP_LOCAL_REGION" ++ ], ++ "enumDescriptions": [ ++ "This region is not in any common network presence with this InterconnectLocation.", ++ "This region shares the same regional network presence as this InterconnectLocation.", ++ "[Deprecated] This region is not in any common network presence with this InterconnectLocation.", ++ "[Deprecated] This region shares the same regional network presence as this InterconnectLocation." ++ ], ++ "type": "string" ++ }, ++ "region": { ++ "description": "URL for the region of this location.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectOutageNotification": { ++ "description": "Description of a planned outage on this Interconnect.", ++ "id": "InterconnectOutageNotification", ++ "properties": { ++ "affectedCircuits": { ++ "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "description": { ++ "description": "A description about the purpose of the outage.", ++ "type": "string" ++ }, ++ "endTime": { ++ "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", ++ "format": "int64", ++ "type": "string" ++ }, ++ "issueType": { ++ "description": "Form this outage is expected to take, which can take one of the following values: - OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", ++ "enum": [ ++ "IT_OUTAGE", ++ "IT_PARTIAL_OUTAGE", ++ "OUTAGE", ++ "PARTIAL_OUTAGE" ++ ], ++ "enumDescriptions": [ ++ "[Deprecated] The Interconnect may be completely out of service for some or all of the specified window.", ++ "[Deprecated] Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth.", ++ "The Interconnect may be completely out of service for some or all of the specified window.", ++ "Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth." ++ ], ++ "type": "string" ++ }, ++ "name": { ++ "description": "Unique identifier for this outage notification.", ++ "type": "string" ++ }, ++ "source": { ++ "description": "The party that generated this notification, which can take the following value: - GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", ++ "enum": [ ++ "GOOGLE", ++ "NSRC_GOOGLE" ++ ], ++ "enumDescriptions": [ ++ "This notification was generated by Google.", ++ "[Deprecated] This notification was generated by Google." ++ ], ++ "type": "string" ++ }, ++ "startTime": { ++ "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", ++ "format": "int64", ++ "type": "string" ++ }, ++ "state": { ++ "description": "State of this notification, which can take one of the following values: - ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. - CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. - COMPLETED: The outage associated with this notification is complete. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", ++ "enum": [ ++ "ACTIVE", ++ "CANCELLED", ++ "COMPLETED", ++ "NS_ACTIVE", ++ "NS_CANCELED" ++ ], ++ "enumDescriptions": [ ++ "This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", ++ "The outage associated with this notification was cancelled before the outage was due to start.", ++ "The outage associated with this notification is complete.", ++ "[Deprecated] This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", ++ "[Deprecated] The outage associated with this notification was canceled before the outage was due to start." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocation": { ++ "description": "Represents a Cross-Cloud Interconnect Remote Location resource. You can use this resource to find remote location details about an Interconnect attachment (VLAN).", ++ "id": "InterconnectRemoteLocation", ++ "properties": { ++ "address": { ++ "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character.", ++ "type": "string" ++ }, ++ "attachmentConfigurationConstraints": { ++ "$ref": "InterconnectAttachmentConfigurationConstraints", ++ "description": "[Output Only] Subset of fields from InterconnectAttachment's |configurationConstraints| field that apply to all attachments for this remote location." ++ }, ++ "city": { ++ "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", ++ "type": "string" ++ }, ++ "constraints": { ++ "$ref": "InterconnectRemoteLocationConstraints", ++ "description": "[Output Only] Constraints on the parameters for creating Cross-Cloud Interconnect and associated InterconnectAttachments." ++ }, ++ "continent": { ++ "description": "[Output Only] Continent for this location, which can take one of the following values: - AFRICA - ASIA_PAC - EUROPE - NORTH_AMERICA - SOUTH_AMERICA ", ++ "enum": [ ++ "AFRICA", ++ "ASIA_PAC", ++ "EUROPE", ++ "NORTH_AMERICA", ++ "SOUTH_AMERICA" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ }, ++ "creationTimestamp": { ++ "description": "[Output Only] Creation timestamp in RFC3339 text format.", ++ "type": "string" ++ }, ++ "description": { ++ "description": "[Output Only] An optional description of the resource.", ++ "type": "string" ++ }, ++ "facilityProvider": { ++ "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX).", ++ "type": "string" ++ }, ++ "facilityProviderFacilityId": { ++ "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1).", ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "kind": { ++ "default": "compute#interconnectRemoteLocation", ++ "description": "[Output Only] Type of the resource. Always compute#interconnectRemoteLocation for interconnect remote locations.", ++ "type": "string" ++ }, ++ "lacp": { ++ "description": "[Output Only] Link Aggregation Control Protocol (LACP) constraints, which can take one of the following values: LACP_SUPPORTED, LACP_UNSUPPORTED", ++ "enum": [ ++ "LACP_SUPPORTED", ++ "LACP_UNSUPPORTED" ++ ], ++ "enumDescriptions": [ ++ "LACP_SUPPORTED: LACP is supported, and enabled by default on the Cross-Cloud Interconnect.", ++ "LACP_UNSUPPORTED: LACP is not supported and is not be enabled on this port. GetDiagnostics shows bundleAggregationType as \"static\". GCP does not support LAGs without LACP, so requestedLinkCount must be 1." ++ ], ++ "type": "string" ++ }, ++ "maxLagSize100Gbps": { ++ "description": "[Output Only] The maximum number of 100 Gbps ports supported in a link aggregation group (LAG). When linkType is 100 Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "maxLagSize10Gbps": { ++ "description": "[Output Only] The maximum number of 10 Gbps ports supported in a link aggregation group (LAG). When linkType is 10 Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "name": { ++ "description": "[Output Only] Name of the resource.", ++ "type": "string" ++ }, ++ "peeringdbFacilityId": { ++ "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb).", ++ "type": "string" ++ }, ++ "permittedConnections": { ++ "description": "[Output Only] Permitted connections.", ++ "items": { ++ "$ref": "InterconnectRemoteLocationPermittedConnections" ++ }, ++ "type": "array" ++ }, ++ "remoteService": { ++ "description": "[Output Only] Indicates the service provider present at the remote location. Example values: \"Amazon Web Services\", \"Microsoft Azure\".", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for the resource.", ++ "type": "string" ++ }, ++ "status": { ++ "description": "[Output Only] The status of this InterconnectRemoteLocation, which can take one of the following values: - CLOSED: The InterconnectRemoteLocation is closed and is unavailable for provisioning new Cross-Cloud Interconnects. - AVAILABLE: The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects. ", ++ "enum": [ ++ "AVAILABLE", ++ "CLOSED" ++ ], ++ "enumDescriptions": [ ++ "The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects.", ++ "The InterconnectRemoteLocation is closed for provisioning new Cross-Cloud Interconnects." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationConstraints": { ++ "id": "InterconnectRemoteLocationConstraints", ++ "properties": { ++ "portPairRemoteLocation": { ++ "description": "[Output Only] Port pair remote location constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to individual ports, but the UI uses this field when ordering a pair of ports, to prevent users from accidentally ordering something that is incompatible with their cloud provider. Specifically, when ordering a redundant pair of Cross-Cloud Interconnect ports, and one of them uses a remote location with portPairMatchingRemoteLocation set to matching, the UI requires that both ports use the same remote location.", ++ "enum": [ ++ "PORT_PAIR_MATCHING_REMOTE_LOCATION", ++ "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" ++ ], ++ "enumDescriptions": [ ++ "If PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider allocates ports in pairs, and the user should choose the same remote location for both ports.", ++ "If PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision a redundant pair of Cross-Cloud Interconnects using two different remote locations in the same city." ++ ], ++ "type": "string" ++ }, ++ "portPairVlan": { ++ "description": "[Output Only] Port pair VLAN constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, PORT_PAIR_MATCHING_VLAN", ++ "enum": [ ++ "PORT_PAIR_MATCHING_VLAN", ++ "PORT_PAIR_UNCONSTRAINED_VLAN" ++ ], ++ "enumDescriptions": [ ++ "If PORT_PAIR_MATCHING_VLAN, the Interconnect for this attachment is part of a pair of ports that should have matching VLAN allocations. This occurs with Cross-Cloud Interconnect to Azure remote locations. While GCP's API does not explicitly group pairs of ports, the UI uses this field to ensure matching VLAN ids when configuring a redundant VLAN pair.", ++ "PORT_PAIR_UNCONSTRAINED_VLAN means there is no constraint." ++ ], ++ "type": "string" ++ }, ++ "subnetLengthRange": { ++ "$ref": "InterconnectRemoteLocationConstraintsSubnetLengthRange", ++ "description": "[Output Only] [min-length, max-length] The minimum and maximum value (inclusive) for the IPv4 subnet length. For example, an interconnectRemoteLocation for Azure has {min: 30, max: 30} because Azure requires /30 subnets. This range specifies the values supported by both cloud providers. Interconnect currently supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no constraint on IPv4 subnet length, the range would thus be {min: 29, max: 30}. " ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationConstraintsSubnetLengthRange": { ++ "id": "InterconnectRemoteLocationConstraintsSubnetLengthRange", ++ "properties": { ++ "max": { ++ "format": "int32", ++ "type": "integer" ++ }, ++ "min": { ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationList": { ++ "description": "Response to the list request, and contains a list of interconnect remote locations.", ++ "id": "InterconnectRemoteLocationList", ++ "properties": { ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "description": "A list of InterconnectRemoteLocation resources.", ++ "items": { ++ "$ref": "InterconnectRemoteLocation" ++ }, ++ "type": "array" ++ }, ++ "kind": { ++ "default": "compute#interconnectRemoteLocationList", ++ "description": "[Output Only] Type of resource. Always compute#interconnectRemoteLocationList for lists of interconnect remote locations.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token lets you get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationPermittedConnections": { ++ "id": "InterconnectRemoteLocationPermittedConnections", + "properties": { +- "expectedRttMs": { +- "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", +- "format": "int64", +- "type": "string" +- }, +- "locationPresence": { +- "description": "Identifies the network presence of this location.", +- "enum": [ +- "GLOBAL", +- "LOCAL_REGION", +- "LP_GLOBAL", +- "LP_LOCAL_REGION" +- ], +- "enumDescriptions": [ +- "This region is not in any common network presence with this InterconnectLocation.", +- "This region shares the same regional network presence as this InterconnectLocation.", +- "[Deprecated] This region is not in any common network presence with this InterconnectLocation.", +- "[Deprecated] This region shares the same regional network presence as this InterconnectLocation." +- ], +- "type": "string" +- }, +- "region": { +- "description": "URL for the region of this location.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InterconnectOutageNotification": { +- "description": "Description of a planned outage on this Interconnect.", +- "id": "InterconnectOutageNotification", +- "properties": { +- "affectedCircuits": { +- "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- }, +- "description": { +- "description": "A description about the purpose of the outage.", +- "type": "string" +- }, +- "endTime": { +- "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", +- "format": "int64", +- "type": "string" +- }, +- "issueType": { +- "description": "Form this outage is expected to take, which can take one of the following values: - OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", +- "enum": [ +- "IT_OUTAGE", +- "IT_PARTIAL_OUTAGE", +- "OUTAGE", +- "PARTIAL_OUTAGE" +- ], +- "enumDescriptions": [ +- "[Deprecated] The Interconnect may be completely out of service for some or all of the specified window.", +- "[Deprecated] Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth.", +- "The Interconnect may be completely out of service for some or all of the specified window.", +- "Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth." +- ], +- "type": "string" +- }, +- "name": { +- "description": "Unique identifier for this outage notification.", +- "type": "string" +- }, +- "source": { +- "description": "The party that generated this notification, which can take the following value: - GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", +- "enum": [ +- "GOOGLE", +- "NSRC_GOOGLE" +- ], +- "enumDescriptions": [ +- "This notification was generated by Google.", +- "[Deprecated] This notification was generated by Google." +- ], +- "type": "string" +- }, +- "startTime": { +- "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", +- "format": "int64", +- "type": "string" +- }, +- "state": { +- "description": "State of this notification, which can take one of the following values: - ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. - CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. - COMPLETED: The outage associated with this notification is complete. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", +- "enum": [ +- "ACTIVE", +- "CANCELLED", +- "COMPLETED", +- "NS_ACTIVE", +- "NS_CANCELED" +- ], +- "enumDescriptions": [ +- "This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", +- "The outage associated with this notification was cancelled before the outage was due to start.", +- "The outage associated with this notification is complete.", +- "[Deprecated] This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", +- "[Deprecated] The outage associated with this notification was canceled before the outage was due to start." +- ], ++ "interconnectLocation": { ++ "description": "[Output Only] URL of an Interconnect location that is permitted to connect to this Interconnect remote location.", + "type": "string" + } + }, +@@ -52221,7 +55219,7 @@ + "type": "integer" + }, + "guestAcceleratorType": { +- "description": "The accelerator type resource name, not a full URL, e.g. 'nvidia-tesla-k80'.", ++ "description": "The accelerator type resource name, not a full URL, e.g. nvidia-tesla-t4.", + "type": "string" + } + }, +@@ -52717,7 +55715,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -52992,7 +55990,7 @@ + "type": "string" + }, + "gatewayIPv4": { +- "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", ++ "description": "[Output Only] The gateway address for default routing out of the network, selected by Google Cloud.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", + "type": "string" + }, +@@ -53099,7 +56097,7 @@ + "type": "string" + }, + "fingerprint": { +- "description": "[Output Only] Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", ++ "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", + "format": "byte", + "type": "string" + }, +@@ -53124,7 +56122,7 @@ + "type": "string" + }, + "network": { +- "description": "[Output Only] The URL of the network which the Network Attachment belongs to.", ++ "description": "[Output Only] The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks.", + "type": "string" + }, + "producerAcceptLists": { +@@ -53288,7 +56286,7 @@ + "id": "NetworkAttachmentConnectedEndpoint", + "properties": { + "ipAddress": { +- "description": "The IP address assigned to the producer instance network interface. This value will be a range in case of Serverless.", ++ "description": "The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless.", + "type": "string" + }, + "projectIdOrNum": { +@@ -53296,7 +56294,7 @@ + "type": "string" + }, + "secondaryIpCidrRanges": { +- "description": "Alias IP ranges from the same subnetwork", ++ "description": "Alias IP ranges from the same subnetwork.", + "items": { + "type": "string" + }, +@@ -53855,6 +56853,10 @@ + "description": "Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used.", + "type": "string" + }, ++ "ipv6Address": { ++ "description": "Optional IPv6 address of network endpoint.", ++ "type": "string" ++ }, + "port": { + "description": "Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used.", + "format": "int32", +@@ -54746,7 +57748,7 @@ + "type": "integer" + }, + "stackType": { +- "description": "The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations.", ++ "description": "The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations.", + "enum": [ + "IPV4_IPV6", + "IPV4_ONLY" +@@ -59416,6 +62418,7 @@ + "COMMITTED_NVIDIA_A100_80GB_GPUS", + "COMMITTED_NVIDIA_A100_GPUS", + "COMMITTED_NVIDIA_K80_GPUS", ++ "COMMITTED_NVIDIA_L4_GPUS", + "COMMITTED_NVIDIA_P100_GPUS", + "COMMITTED_NVIDIA_P4_GPUS", + "COMMITTED_NVIDIA_T4_GPUS", +@@ -59467,11 +62470,15 @@ + "NETWORK_ATTACHMENTS", + "NETWORK_ENDPOINT_GROUPS", + "NETWORK_FIREWALL_POLICIES", ++ "NET_LB_SECURITY_POLICIES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION", + "NODE_GROUPS", + "NODE_TEMPLATES", + "NVIDIA_A100_80GB_GPUS", + "NVIDIA_A100_GPUS", + "NVIDIA_K80_GPUS", ++ "NVIDIA_L4_GPUS", + "NVIDIA_P100_GPUS", + "NVIDIA_P100_VWS_GPUS", + "NVIDIA_P4_GPUS", +@@ -59486,6 +62493,7 @@ + "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS", + "PREEMPTIBLE_NVIDIA_A100_GPUS", + "PREEMPTIBLE_NVIDIA_K80_GPUS", ++ "PREEMPTIBLE_NVIDIA_L4_GPUS", + "PREEMPTIBLE_NVIDIA_P100_GPUS", + "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", + "PREEMPTIBLE_NVIDIA_P4_GPUS", +@@ -59510,6 +62518,7 @@ + "ROUTES", + "SECURITY_POLICIES", + "SECURITY_POLICIES_PER_REGION", ++ "SECURITY_POLICY_ADVANCED_RULES_PER_REGION", + "SECURITY_POLICY_CEVAL_RULES", + "SECURITY_POLICY_RULES", + "SECURITY_POLICY_RULES_PER_REGION", +@@ -59567,6 +62576,7 @@ + "", + "", + "", ++ "", + "Guest CPUs", + "", + "", +@@ -59659,6 +62669,12 @@ + "", + "", + "", ++ "", ++ "", ++ "", ++ "", ++ "", ++ "", + "The total number of snapshots allowed for a single project.", + "", + "", +@@ -59812,6 +62828,20 @@ + }, + "type": "object" + }, ++ "RegionAddressesMoveRequest": { ++ "id": "RegionAddressesMoveRequest", ++ "properties": { ++ "description": { ++ "description": "An optional destination address description if intended to be different from the source.", ++ "type": "string" ++ }, ++ "destinationAddress": { ++ "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project/regions/region /addresses/address - projects/project/regions/region/addresses/address Note that destination project must be different from the source project. So /regions/region/addresses/address is not valid partial url.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "RegionAutoscalerList": { + "description": "Contains a list of autoscalers.", + "id": "RegionAutoscalerList", +@@ -60101,6 +63131,16 @@ + }, + "type": "object" + }, ++ "RegionDisksStartAsyncReplicationRequest": { ++ "id": "RegionDisksStartAsyncReplicationRequest", ++ "properties": { ++ "asyncSecondaryDisk": { ++ "description": "The secondary disk to start asynchronous replication to. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "RegionInstanceGroupList": { + "description": "Contains a list of InstanceGroup resources.", + "id": "RegionInstanceGroupList", +@@ -60845,6 +63885,16 @@ + }, + "type": "object" + }, ++ "RegionInstantSnapshotsExportRequest": { ++ "id": "RegionInstantSnapshotsExportRequest", ++ "properties": { ++ "exportParams": { ++ "$ref": "InstantSnapshotExportParams", ++ "description": "Parameters to export the changed blocks." ++ } ++ }, ++ "type": "object" ++ }, + "RegionList": { + "description": "Contains a list of region resources.", + "id": "RegionList", +@@ -61741,6 +64791,10 @@ + "description": { + "type": "string" + }, ++ "diskConsistencyGroupPolicy": { ++ "$ref": "ResourcePolicyDiskConsistencyGroupPolicy", ++ "description": "Resource policy for disk consistency groups." ++ }, + "groupPlacementPolicy": { + "$ref": "ResourcePolicyGroupPlacementPolicy", + "description": "Resource policy for instances for placement configuration." +@@ -61956,6 +65010,12 @@ + }, + "type": "object" + }, ++ "ResourcePolicyDiskConsistencyGroupPolicy": { ++ "description": "Resource policy for disk consistency groups.", ++ "id": "ResourcePolicyDiskConsistencyGroupPolicy", ++ "properties": {}, ++ "type": "object" ++ }, + "ResourcePolicyGroupPlacementPolicy": { + "description": "A GroupPlacementPolicy specifies resource placement configuration. It specifies the failure bucket separation as well as network locality", + "id": "ResourcePolicyGroupPlacementPolicy", +@@ -63095,6 +66155,18 @@ + "$ref": "RouterBgpPeerBfd", + "description": "BFD configuration for the BGP peering." + }, ++ "customLearnedIpRanges": { ++ "description": "A list of user-defined custom learned route IP address ranges for a BGP session.", ++ "items": { ++ "$ref": "RouterBgpPeerCustomLearnedIpRange" ++ }, ++ "type": "array" ++ }, ++ "customLearnedRoutePriority": { ++ "description": "The user-defined custom learned route priority for a BGP session. This value is applied to all custom learned route ranges for the session. You can choose a value from `0` to `65335`. If you don't provide a value, Google Cloud assigns a priority of `100` to the ranges.", ++ "format": "int32", ++ "type": "integer" ++ }, + "enable": { + "description": "The status of the BGP peer connection. If set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE.", + "enum": [ +@@ -63209,6 +66281,16 @@ + }, + "type": "object" + }, ++ "RouterBgpPeerCustomLearnedIpRange": { ++ "id": "RouterBgpPeerCustomLearnedIpRange", ++ "properties": { ++ "range": { ++ "description": "The custom learned route IP address range. Must be a valid CIDR-formatted prefix. If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, a `/32` singular IP address range, and, for IPv6, `/128`.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "RouterInterface": { + "id": "RouterInterface", + "properties": { +@@ -63411,6 +66493,22 @@ + "description": "Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided.", + "id": "RouterNat", + "properties": { ++ "autoNetworkTier": { ++ "description": "The network tier to use when automatically reserving IP addresses. Must be one of: PREMIUM, STANDARD. If not specified, PREMIUM tier will be used.", ++ "enum": [ ++ "FIXED_STANDARD", ++ "PREMIUM", ++ "STANDARD", ++ "STANDARD_OVERRIDES_FIXED_STANDARD" ++ ], ++ "enumDescriptions": [ ++ "Public internet quality with fixed bandwidth.", ++ "High quality, Google-grade network tier, support for all networking products.", ++ "Public internet quality, only limited support for other networking products.", ++ "(Output only) Temporary tier for FIXED_STANDARD when fixed standard tier is expired or not configured." ++ ], ++ "type": "string" ++ }, + "drainNatIps": { + "description": "A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT only.", + "items": { +@@ -63491,7 +66589,7 @@ + "type": "array" + }, + "sourceSubnetworkIpRangesToNat": { +- "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", ++ "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then there should not be any other Router.Nat section in any Router for this network in this region.", + "enum": [ + "ALL_SUBNETWORKS_ALL_IP_RANGES", + "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", +@@ -64804,6 +67902,13 @@ + "" + ], + "type": "string" ++ }, ++ "userDefinedFields": { ++ "description": "Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: \"ipv4_fragment_offset\" base: IPV4 offset: 6 size: 2 mask: \"0x1fff\"", ++ "items": { ++ "$ref": "SecurityPolicyUserDefinedField" ++ }, ++ "type": "array" + } + }, + "type": "object" +@@ -64846,15 +67951,15 @@ + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig": { +- "description": "Configuration options for L7 DDoS detection.", ++ "description": "Configuration options for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig", + "properties": { + "enable": { +- "description": "If set to true, enables CAAP for L7 DDoS detection.", ++ "description": "If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "boolean" + }, + "ruleVisibility": { +- "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules.", ++ "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "enum": [ + "PREMIUM", + "STANDARD" +@@ -64896,6 +68001,13 @@ + "" + ], + "type": "string" ++ }, ++ "userIpRequestHeaders": { ++ "description": "An optional list of case-insensitive request header names to use for resolving the callers client IP address.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" + } + }, + "type": "object" +@@ -65073,7 +68185,7 @@ + "id": "SecurityPolicyRecaptchaOptionsConfig", + "properties": { + "redirectSiteKey": { +- "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used.", ++ "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + } + }, +@@ -65093,7 +68205,7 @@ + "id": "SecurityPolicyRule", + "properties": { + "action": { +- "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", ++ "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", + "type": "string" + }, + "description": { +@@ -65118,7 +68230,7 @@ + }, + "headerAction": { + "$ref": "SecurityPolicyRuleHttpHeaderAction", +- "description": "Optional, additional actions that are performed on headers." ++ "description": "Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "kind": { + "default": "compute#securityPolicyRule", +@@ -65129,6 +68241,10 @@ + "$ref": "SecurityPolicyRuleMatcher", + "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced." + }, ++ "networkMatch": { ++ "$ref": "SecurityPolicyRuleNetworkMatcher", ++ "description": "A match condition that incoming packets are evaluated against for CLOUD_ARMOR_NETWORK security policies. If it matches, the corresponding 'action' is enforced. The match criteria for a rule consists of built-in match fields (like 'srcIpRanges') and potentially multiple user-defined match fields ('userDefinedFields'). Field values may be extracted directly from the packet or derived from it (e.g. 'srcRegionCodes'). Some fields may not be present in every packet (e.g. 'srcPorts'). A user-defined field is only present if the base header is found in the packet and the entire field is in bounds. Each match field may specify which values can match it, listing one or more ranges, prefixes, or exact values that are considered a match for the field. A field value must be present in order to match a specified match field. If no match values are specified for a match field, then any field value is considered to match it, and it's not required to be present. For strings specifying '*' is also equivalent to match all. For a packet to match a rule, all specified match fields must match the corresponding field values derived from the packet. Example: networkMatch: srcIpRanges: - \"192.0.2.0/24\" - \"198.51.100.0/24\" userDefinedFields: - name: \"ipv4_fragment_offset\" values: - \"1-0x1fff\" The above match condition matches packets with a source IP in 192.0.2.0/24 or 198.51.100.0/24 and a user-defined field named \"ipv4_fragment_offset\" with a value between 1 and 0x1fff inclusive." ++ }, + "preconfiguredWafConfig": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfig", + "description": "Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect." +@@ -65148,7 +68264,7 @@ + }, + "redirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action. Cannot be specified for any other actions." ++ "description": "Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "ruleNumber": { + "description": "Identifier for the rule. This is only unique within the given security policy. This can only be set during rule creation, if rule number is not specified it will be generated by the server.", +@@ -65214,7 +68330,11 @@ + }, + "expr": { + "$ref": "Expr", +- "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." ++ "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing `evaluateThreatIntelligence` require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies." ++ }, ++ "exprOptions": { ++ "$ref": "SecurityPolicyRuleMatcherExprOptions", ++ "description": "The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr')." + }, + "versionedExpr": { + "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", +@@ -65275,6 +68395,117 @@ + }, + "type": "object" + }, ++ "SecurityPolicyRuleMatcherExprOptions": { ++ "id": "SecurityPolicyRuleMatcherExprOptions", ++ "properties": { ++ "recaptchaOptions": { ++ "$ref": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", ++ "description": "reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field will have no effect." ++ } ++ }, ++ "type": "object" ++ }, ++ "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions": { ++ "id": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", ++ "properties": { ++ "actionTokenSiteKeys": { ++ "description": "A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "sessionTokenSiteKeys": { ++ "description": "A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "SecurityPolicyRuleNetworkMatcher": { ++ "description": "Represents a match condition that incoming network traffic is evaluated against.", ++ "id": "SecurityPolicyRuleNetworkMatcher", ++ "properties": { ++ "destIpRanges": { ++ "description": "Destination IPv4/IPv6 addresses or CIDR prefixes, in standard text format.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "destPorts": { ++ "description": "Destination port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. \"80\") or range (e.g. \"0-1023\").", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "ipProtocols": { ++ "description": "IPv4 protocol / IPv6 next header (after extension headers). Each element can be an 8-bit unsigned decimal number (e.g. \"6\"), range (e.g. \"253-254\"), or one of the following protocol names: \"tcp\", \"udp\", \"icmp\", \"esp\", \"ah\", \"ipip\", or \"sctp\".", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "srcAsns": { ++ "description": "BGP Autonomous System Number associated with the source IP address.", ++ "items": { ++ "format": "uint32", ++ "type": "integer" ++ }, ++ "type": "array" ++ }, ++ "srcIpRanges": { ++ "description": "Source IPv4/IPv6 addresses or CIDR prefixes, in standard text format.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "srcPorts": { ++ "description": "Source port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. \"80\") or range (e.g. \"0-1023\").", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "srcRegionCodes": { ++ "description": "Two-letter ISO 3166-1 alpha-2 country code associated with the source IP address.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "userDefinedFields": { ++ "description": "User-defined fields. Each element names a defined field and lists the matching values for that field.", ++ "items": { ++ "$ref": "SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch": { ++ "id": "SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch", ++ "properties": { ++ "name": { ++ "description": "Name of the user-defined field, as given in the definition.", ++ "type": "string" ++ }, ++ "values": { ++ "description": "Matching values of the field. Each element can be a 32-bit unsigned decimal or hexadecimal (starting with \"0x\") number (e.g. \"64\") or range (e.g. \"0x400-0x7ff\").", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "SecurityPolicyRulePreconfiguredWafConfig": { + "id": "SecurityPolicyRulePreconfiguredWafConfig", + "properties": { +@@ -65415,12 +68646,12 @@ + "type": "string" + }, + "exceedAction": { +- "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below.", ++ "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below. The `redirect` action is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + }, + "exceedRedirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect." ++ "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "rateLimitThreshold": { + "$ref": "SecurityPolicyRuleRateLimitOptionsThreshold", +@@ -65503,6 +68734,46 @@ + }, + "type": "object" + }, ++ "SecurityPolicyUserDefinedField": { ++ "id": "SecurityPolicyUserDefinedField", ++ "properties": { ++ "base": { ++ "description": "The base relative to which 'offset' is measured. Possible values are: - IPV4: Points to the beginning of the IPv4 header. - IPV6: Points to the beginning of the IPv6 header. - TCP: Points to the beginning of the TCP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. - UDP: Points to the beginning of the UDP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. required", ++ "enum": [ ++ "IPV4", ++ "IPV6", ++ "TCP", ++ "UDP" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ }, ++ "mask": { ++ "description": "If specified, apply this mask (bitwise AND) to the field to ignore bits before matching. Encoded as a hexadecimal number (starting with \"0x\"). The last byte of the field (in network byte order) corresponds to the least significant byte of the mask.", ++ "type": "string" ++ }, ++ "name": { ++ "description": "The name of this field. Must be unique within the policy.", ++ "type": "string" ++ }, ++ "offset": { ++ "description": "Offset of the first byte of the field (in network byte order) relative to 'base'.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "size": { ++ "description": "Size of the field in bytes. Valid values: 1-4.", ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "SecuritySettings": { + "description": "The authentication and authorization settings for a BackendService.", + "id": "SecuritySettings", +@@ -65516,11 +68787,11 @@ + "description": "The configuration needed to generate a signature for access to private storage buckets that support AWS's Signature Version 4 for authentication. Allowed only for INTERNET_IP_PORT and INTERNET_FQDN_PORT NEG backends." + }, + "clientTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted.", + "type": "string" + }, + "subjectAltNames": { +- "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact.", ++ "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode).", + "items": { + "type": "string" + }, +@@ -65597,7 +68868,7 @@ + "type": "object" + }, + "ServiceAttachment": { +- "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service. next tag = 20", ++ "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service.", + "id": "ServiceAttachment", + "properties": { + "connectedEndpoints": { +@@ -65694,6 +68965,10 @@ + "$ref": "Uint128", + "description": "[Output Only] An 128-bit global unique ID of the PSC service attachment." + }, ++ "reconcileConnections": { ++ "description": "This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. - If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . - If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. For newly created service attachment, this boolean defaults to true.", ++ "type": "boolean" ++ }, + "region": { + "description": "[Output Only] URL of the region where the service attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" +@@ -66404,6 +69679,7 @@ + "name": { + "annotations": { + "required": [ ++ "compute.disks.createSnapshot", + "compute.snapshots.insert" + ] + }, +@@ -66447,6 +69723,14 @@ + "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name.", + "type": "string" + }, ++ "sourceInstantSnapshot": { ++ "description": "The source instant snapshot used to create this snapshot. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instantSnapshots/instantSnapshot - projects/project/zones/zone/instantSnapshots/instantSnapshot - zones/zone/instantSnapshots/instantSnapshot ", ++ "type": "string" ++ }, ++ "sourceInstantSnapshotId": { ++ "description": "[Output Only] The unique ID of the instant snapshot used to create this snapshot. This value identifies the exact instant snapshot that was used to create this persistent disk. For example, if you created the persistent disk from an instant snapshot that was later deleted and recreated under the same name, the source instant snapshot ID would identify the exact instant snapshot that was used.", ++ "type": "string" ++ }, + "sourceSnapshotSchedulePolicy": { + "description": "[Output Only] URL of the resource policy which created this scheduled snapshot.", + "type": "string" +@@ -67901,7 +71185,7 @@ + "type": "string" + }, + "enableFlowLogs": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "externalIpv6Prefix": { +@@ -67983,7 +71267,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "INTERNAL_HTTPS_LOAD_BALANCER", + "PRIVATE", +@@ -68009,7 +71293,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -68330,7 +71614,7 @@ + "type": "string" + }, + "enable": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. Flow logging isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "filterExpr": { +@@ -68899,6 +72183,11 @@ + }, + "type": "array" + }, ++ "httpKeepAliveTimeoutSec": { ++ "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", ++ "format": "int32", ++ "type": "integer" ++ }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", +@@ -69286,7 +72575,7 @@ + "id": "TargetHttpsProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetHttpsProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetHttpsProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -69342,7 +72631,7 @@ + "type": "string" + }, + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -69365,6 +72654,11 @@ + }, + "type": "array" + }, ++ "httpKeepAliveTimeoutSec": { ++ "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", ++ "format": "int32", ++ "type": "integer" ++ }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", +@@ -69407,7 +72701,7 @@ + "type": "string" + }, + "serverTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted.", + "type": "string" + }, + "sslCertificates": { +@@ -69720,6 +73014,10 @@ + "description": "The URL of the network this target instance uses to forward traffic. If not specified, the traffic will be forwarded to the network that the default network interface belongs to.", + "type": "string" + }, ++ "securityPolicy": { ++ "description": "[Output Only] The resource URL for the security policy associated with this target instance.", ++ "type": "string" ++ }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" +@@ -70134,6 +73432,10 @@ + "description": "[Output Only] URL of the region where the target pool resides.", + "type": "string" + }, ++ "securityPolicy": { ++ "description": "[Output Only] The resource URL for the security policy associated with this target pool.", ++ "type": "string" ++ }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" +@@ -70606,7 +73908,7 @@ + "id": "TargetSslProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetSslProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetSslProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -70648,7 +73950,7 @@ + "id": "TargetSslProxy", + "properties": { + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -72404,7 +75706,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "INTERNAL_HTTPS_LOAD_BALANCER", + "PRIVATE", +@@ -72422,7 +75724,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -72839,6 +76141,18 @@ + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, ++ "gatewayIpVersion": { ++ "description": "The IP family of the gateway IPs for the HA-VPN gateway interfaces. If not specified, IPV4 will be used.", ++ "enum": [ ++ "IPV4", ++ "IPV6" ++ ], ++ "enumDescriptions": [ ++ "Every HA-VPN gateway interface is configured with an IPv4 address.", ++ "Every HA-VPN gateway interface is configured with an IPv6 address." ++ ], ++ "type": "string" ++ }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", +@@ -73209,7 +76523,7 @@ + "type": "integer" + }, + "peerGatewayInterface": { +- "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or GCP VPN gateway.", ++ "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "format": "uint32", + "type": "integer" + }, +@@ -73221,7 +76535,7 @@ + "type": "object" + }, + "VpnGatewayStatusVpnConnection": { +- "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be a external VPN gateway or GCP VPN gateway.", ++ "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "id": "VpnGatewayStatusVpnConnection", + "properties": { + "peerExternalGateway": { +@@ -73262,6 +76576,10 @@ + "ipAddress": { + "description": "[Output Only] IP address for this VPN interface associated with the VPN gateway. The IP address could be either a regional external IP address or a regional internal IP address. The two IP addresses for a VPN gateway must be all regional external or regional internal IP addresses. There cannot be a mix of regional external IP addresses and regional internal IP addresses. For HA VPN over Cloud Interconnect, the IP addresses for both interfaces could either be regional internal IP addresses or regional external IP addresses. For regular (non HA VPN over Cloud Interconnect) HA VPN tunnels, the IP address must be a regional external IP address.", + "type": "string" ++ }, ++ "ipv6Address": { ++ "description": "[Output Only] IPv6 address for this VPN interface associated with the VPN gateway. The IPv6 address must be a regional external IPv6 address. The format is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0).", ++ "type": "string" + } + }, + "type": "object" +diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +index 0737664ae8d..e614cb37286 100644 +--- a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go ++++ b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +@@ -75,6 +75,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "compute:beta" + const apiName = "compute" +@@ -170,8 +171,10 @@ func New(client *http.Client) (*Service, error) { + s.InstanceGroups = NewInstanceGroupsService(s) + s.InstanceTemplates = NewInstanceTemplatesService(s) + s.Instances = NewInstancesService(s) ++ s.InstantSnapshots = NewInstantSnapshotsService(s) + s.InterconnectAttachments = NewInterconnectAttachmentsService(s) + s.InterconnectLocations = NewInterconnectLocationsService(s) ++ s.InterconnectRemoteLocations = NewInterconnectRemoteLocationsService(s) + s.Interconnects = NewInterconnectsService(s) + s.LicenseCodes = NewLicenseCodesService(s) + s.Licenses = NewLicensesService(s) +@@ -201,6 +204,7 @@ func New(client *http.Client) (*Service, error) { + s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) + s.RegionInstanceTemplates = NewRegionInstanceTemplatesService(s) + s.RegionInstances = NewRegionInstancesService(s) ++ s.RegionInstantSnapshots = NewRegionInstantSnapshotsService(s) + s.RegionNetworkEndpointGroups = NewRegionNetworkEndpointGroupsService(s) + s.RegionNetworkFirewallPolicies = NewRegionNetworkFirewallPoliciesService(s) + s.RegionNotificationEndpoints = NewRegionNotificationEndpointsService(s) +@@ -296,10 +300,14 @@ type Service struct { + + Instances *InstancesService + ++ InstantSnapshots *InstantSnapshotsService ++ + InterconnectAttachments *InterconnectAttachmentsService + + InterconnectLocations *InterconnectLocationsService + ++ InterconnectRemoteLocations *InterconnectRemoteLocationsService ++ + Interconnects *InterconnectsService + + LicenseCodes *LicenseCodesService +@@ -358,6 +366,8 @@ type Service struct { + + RegionInstances *RegionInstancesService + ++ RegionInstantSnapshots *RegionInstantSnapshotsService ++ + RegionNetworkEndpointGroups *RegionNetworkEndpointGroupsService + + RegionNetworkFirewallPolicies *RegionNetworkFirewallPoliciesService +@@ -670,6 +680,15 @@ type InstancesService struct { + s *Service + } + ++func NewInstantSnapshotsService(s *Service) *InstantSnapshotsService { ++ rs := &InstantSnapshotsService{s: s} ++ return rs ++} ++ ++type InstantSnapshotsService struct { ++ s *Service ++} ++ + func NewInterconnectAttachmentsService(s *Service) *InterconnectAttachmentsService { + rs := &InterconnectAttachmentsService{s: s} + return rs +@@ -688,6 +707,15 @@ type InterconnectLocationsService struct { + s *Service + } + ++func NewInterconnectRemoteLocationsService(s *Service) *InterconnectRemoteLocationsService { ++ rs := &InterconnectRemoteLocationsService{s: s} ++ return rs ++} ++ ++type InterconnectRemoteLocationsService struct { ++ s *Service ++} ++ + func NewInterconnectsService(s *Service) *InterconnectsService { + rs := &InterconnectsService{s: s} + return rs +@@ -949,6 +977,15 @@ type RegionInstancesService struct { + s *Service + } + ++func NewRegionInstantSnapshotsService(s *Service) *RegionInstantSnapshotsService { ++ rs := &RegionInstantSnapshotsService{s: s} ++ return rs ++} ++ ++type RegionInstantSnapshotsService struct { ++ s *Service ++} ++ + func NewRegionNetworkEndpointGroupsService(s *Service) *RegionNetworkEndpointGroupsService { + rs := &RegionNetworkEndpointGroupsService{s: s} + return rs +@@ -1978,32 +2015,35 @@ func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + // AccessConfig: An access configuration attached to an instance's + // network interface. Only one access config per instance is supported. + type AccessConfig struct { +- // ExternalIpv6: The first IPv6 address of the external IPv6 range +- // associated with this instance, prefix length is stored in +- // externalIpv6PrefixLength in ipv6AccessConfig. To use a static +- // external IP address, it must be unused and in the same region as the +- // instance's zone. If not specified, Google Cloud will automatically +- // assign an external IPv6 address from the instance's subnetwork. ++ // ExternalIpv6: Applies to ipv6AccessConfigs only. The first IPv6 ++ // address of the external IPv6 range associated with this instance, ++ // prefix length is stored in externalIpv6PrefixLength in ++ // ipv6AccessConfig. To use a static external IP address, it must be ++ // unused and in the same region as the instance's zone. If not ++ // specified, Google Cloud will automatically assign an external IPv6 ++ // address from the instance's subnetwork. + ExternalIpv6 string `json:"externalIpv6,omitempty"` + +- // ExternalIpv6PrefixLength: The prefix length of the external IPv6 +- // range. ++ // ExternalIpv6PrefixLength: Applies to ipv6AccessConfigs only. The ++ // prefix length of the external IPv6 range. + ExternalIpv6PrefixLength int64 `json:"externalIpv6PrefixLength,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#accessConfig + // for access configs. + Kind string `json:"kind,omitempty"` + +- // Name: The name of this access configuration. The default and +- // recommended name is External NAT, but you can use any arbitrary +- // string, such as My external IP or Network Access. ++ // Name: The name of this access configuration. In accessConfigs (IPv4), ++ // the default and recommended name is External NAT, but you can use any ++ // arbitrary string, such as My external IP or Network Access. In ++ // ipv6AccessConfigs, the recommend name is External IPv6. + Name string `json:"name,omitempty"` + +- // NatIP: An external IP address associated with this instance. Specify +- // an unused static external IP address available to the project or +- // leave this field undefined to use an IP from a shared ephemeral IP +- // address pool. If you specify a static external IP address, it must +- // live in the same region as the zone of the instance. ++ // NatIP: Applies to accessConfigs (IPv4) only. An external IP address ++ // associated with this instance. Specify an unused static external IP ++ // address available to the project or leave this field undefined to use ++ // an IP from a shared ephemeral IP address pool. If you specify a ++ // static external IP address, it must live in the same region as the ++ // zone of the instance. + NatIP string `json:"natIP,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -2032,6 +2072,10 @@ type AccessConfig struct { + // external IPv6 range. + PublicPtrDomainName string `json:"publicPtrDomainName,omitempty"` + ++ // SecurityPolicy: [Output Only] The resource URL for the security ++ // policy associated with this access config. ++ SecurityPolicy string `json:"securityPolicy,omitempty"` ++ + // SetPublicPtr: Specifies whether a public DNS 'PTR' record should be + // created to map the external IP address of the instance to a DNS + // domain name. This field is not used in ipv6AccessConfig. A default +@@ -2039,12 +2083,13 @@ type AccessConfig struct { + // associated. + SetPublicPtr bool `json:"setPublicPtr,omitempty"` + +- // Type: The type of configuration. The default and only option is +- // ONE_TO_ONE_NAT. ++ // Type: The type of configuration. In accessConfigs (IPv4), the default ++ // and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default ++ // and only option is DIRECT_IPV6. + // + // Possible values: + // "DIRECT_IPV6" +- // "ONE_TO_ONE_NAT" (default) ++ // "ONE_TO_ONE_NAT" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExternalIpv6") to +@@ -3242,6 +3287,17 @@ type AttachedDisk struct { + // read-write mode. + Mode string `json:"mode,omitempty"` + ++ // SavedState: For LocalSSD disks on VM Instances in STOPPED or ++ // SUSPENDED state, this field is set to PRESERVED if the LocalSSD data ++ // has been saved to a persistent location by customer request. (see the ++ // discard_local_ssd option on Stop/Suspend). Read-only in the api. ++ // ++ // Possible values: ++ // "DISK_SAVED_STATE_UNSPECIFIED" - *[Default]* Disk state has not ++ // been preserved. ++ // "PRESERVED" - Disk state has been preserved. ++ SavedState string `json:"savedState,omitempty"` ++ + // ShieldedInstanceInitialState: [Output Only] shielded vm initial state + // stored on disk + ShieldedInstanceInitialState *InitialStateConfig `json:"shieldedInstanceInitialState,omitempty"` +@@ -3387,6 +3443,13 @@ type AttachedDiskInitializeParams struct { + // disk can handle. Values must be between 1 and 7,124. + ProvisionedThroughput int64 `json:"provisionedThroughput,omitempty,string"` + ++ // ReplicaZones: Required for each regional disk associated with the ++ // instance. Specify the URLs of the zones where the disk should be ++ // replicated to. You must provide exactly two replica zones, and one ++ // zone must be the same as the instance zone. You can't use this option ++ // with boot disks. ++ ReplicaZones []string `json:"replicaZones,omitempty"` ++ + // ResourceManagerTags: Resource manager tags to be bound to the disk. + // Tag keys and values have the same definition as resource manager + // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values +@@ -4386,15 +4449,17 @@ func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { + + // AutoscalingPolicy: Cloud Autoscaler policy. + type AutoscalingPolicy struct { +- // CoolDownPeriodSec: The number of seconds that the autoscaler waits +- // before it starts collecting information from a new instance. This +- // prevents the autoscaler from collecting information when the instance +- // is initializing, during which the collected usage would not be +- // reliable. The default time autoscaler waits is 60 seconds. Virtual +- // machine initialization times might vary because of numerous factors. +- // We recommend that you test how long an instance may take to +- // initialize. To do this, create an instance and time the startup +- // process. ++ // CoolDownPeriodSec: The number of seconds that your application takes ++ // to initialize on a VM instance. This is referred to as the ++ // initialization period (/compute/docs/autoscaler#cool_down_period). ++ // Specifying an accurate initialization period improves autoscaler ++ // decisions. For example, when scaling out, the autoscaler ignores data ++ // from VMs that are still initializing because those VMs might not yet ++ // represent normal usage of your application. The default ++ // initialization period is 60 seconds. Initialization periods might ++ // vary because of numerous factors. We recommend that you test how long ++ // your application takes to initialize. To do this, create a VM and ++ // time your application's startup process. + CoolDownPeriodSec int64 `json:"coolDownPeriodSec,omitempty"` + + // CpuUtilization: Defines the CPU utilization policy that allows the +@@ -4422,7 +4487,12 @@ type AutoscalingPolicy struct { + // instances allowed. + MinNumReplicas int64 `json:"minNumReplicas,omitempty"` + +- // Mode: Defines operating mode for this policy. ++ // Mode: Defines the operating mode for this policy. The following modes ++ // are available: - OFF: Disables the autoscaler but maintains its ++ // configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM ++ // instances only. - ON: Enables all autoscaler activities according to ++ // its policy. For more information, see "Turning off or restricting an ++ // autoscaler" + // + // Possible values: + // "OFF" - Do not automatically scale the MIG in or out. The +@@ -5760,6 +5830,10 @@ type BackendService struct { + // loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED. + MaxStreamDuration *Duration `json:"maxStreamDuration,omitempty"` + ++ // Metadatas: Deployment metadata associated with the resource to be set ++ // by a GKE hub controller and read by the backend RCTH ++ Metadatas map[string]string `json:"metadatas,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -7509,6 +7583,44 @@ func (s *Binding) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// BulkInsertDiskResource: A transient resource used in ++// compute.disks.bulkInsert and compute.regionDisks.bulkInsert. It is ++// only used to process requests and is not persisted. ++type BulkInsertDiskResource struct { ++ // SourceConsistencyGroupPolicy: The URL of the ++ // DiskConsistencyGroupPolicy for the group of disks to clone. This may ++ // be a full or partial URL, such as: - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /resourcePolicies/resourcePolicy - ++ // projects/project/regions/region/resourcePolicies/resourcePolicy - ++ // regions/region/resourcePolicies/resourcePolicy ++ SourceConsistencyGroupPolicy string `json:"sourceConsistencyGroupPolicy,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "SourceConsistencyGroupPolicy") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. ++ // "SourceConsistencyGroupPolicy") to include in API requests with the ++ // JSON null value. By default, fields with empty values are omitted ++ // from API requests. However, any field with an empty value appearing ++ // in NullFields will be sent to the server as null. It is an error if a ++ // field in this list has a non-empty value. This may be used to include ++ // null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *BulkInsertDiskResource) MarshalJSON() ([]byte, error) { ++ type NoMethod BulkInsertDiskResource ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // BulkInsertInstanceResource: A transient resource used in + // compute.instances.bulkInsert and compute.regionInstances.bulkInsert . + // This resource is not persisted anywhere, it is used only for +@@ -7589,11 +7701,15 @@ func (s *BulkInsertInstanceResource) MarshalJSON() ([]byte, error) { + // properties to be set on individual instances. To be extended in the + // future. + type BulkInsertInstanceResourcePerInstanceProperties struct { ++ // Hostname: Specifies the hostname of the instance. More details in: ++ // https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention ++ Hostname string `json:"hostname,omitempty"` ++ + // Name: This field is only temporary. It will be removed. Do not use + // it. + Name string `json:"name,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "Name") to ++ // ForceSendFields is a list of field names (e.g. "Hostname") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -7601,8 +7717,8 @@ type BulkInsertInstanceResourcePerInstanceProperties struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Name") to include in API +- // requests with the JSON null value. By default, fields with empty ++ // NullFields is a list of field names (e.g. "Hostname") to include in ++ // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. +@@ -7889,7 +8005,7 @@ type Commitment struct { + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + +- // SplitSourceCommitment: Source commitment to be splitted into a new ++ // SplitSourceCommitment: Source commitment to be split into a new + // commitment. + SplitSourceCommitment string `json:"splitSourceCommitment,omitempty"` + +@@ -7930,6 +8046,7 @@ type Commitment struct { + // "GENERAL_PURPOSE_N2" + // "GENERAL_PURPOSE_N2D" + // "GENERAL_PURPOSE_T2D" ++ // "GRAPHICS_OPTIMIZED" + // "MEMORY_OPTIMIZED" + // "MEMORY_OPTIMIZED_M3" + // "TYPE_UNSPECIFIED" +@@ -9095,6 +9212,13 @@ type Disk struct { + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + ++ // AsyncPrimaryDisk: Disk asynchronously replicated into this disk. ++ AsyncPrimaryDisk *DiskAsyncReplication `json:"asyncPrimaryDisk,omitempty"` ++ ++ // AsyncSecondaryDisks: [Output Only] A list of disks this disk is ++ // asynchronously replicated to. ++ AsyncSecondaryDisks map[string]DiskAsyncReplicationList `json:"asyncSecondaryDisks,omitempty"` ++ + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` +@@ -9250,6 +9374,10 @@ type Disk struct { + // automatic snapshot creations. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + ++ // ResourceStatus: [Output Only] Status information for the disk ++ // resource. ++ ResourceStatus *DiskResourceStatus `json:"resourceStatus,omitempty"` ++ + // SatisfiesPzs: [Output Only] Reserved for future use. + SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + +@@ -9265,6 +9393,16 @@ type Disk struct { + // source. Acceptable values are 1 to 65536, inclusive. + SizeGb int64 `json:"sizeGb,omitempty,string"` + ++ // SourceConsistencyGroupPolicy: [Output Only] URL of the ++ // DiskConsistencyGroupPolicy for a secondary disk that was created ++ // using a consistency group. ++ SourceConsistencyGroupPolicy string `json:"sourceConsistencyGroupPolicy,omitempty"` ++ ++ // SourceConsistencyGroupPolicyId: [Output Only] ID of the ++ // DiskConsistencyGroupPolicy for a secondary disk that was created ++ // using a consistency group. ++ SourceConsistencyGroupPolicyId string `json:"sourceConsistencyGroupPolicyId,omitempty"` ++ + // SourceDisk: The source disk used to create this disk. You can provide + // this as a partial or full URL to the resource. For example, the + // following are valid values: - +@@ -9311,6 +9449,24 @@ type Disk struct { + // version of the image that was used. + SourceImageId string `json:"sourceImageId,omitempty"` + ++ // SourceInstantSnapshot: The source instant snapshot used to create ++ // this disk. You can provide this as a partial or full URL to the ++ // resource. For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /instantSnapshots/instantSnapshot - ++ // projects/project/zones/zone/instantSnapshots/instantSnapshot - ++ // zones/zone/instantSnapshots/instantSnapshot ++ SourceInstantSnapshot string `json:"sourceInstantSnapshot,omitempty"` ++ ++ // SourceInstantSnapshotId: [Output Only] The unique ID of the instant ++ // snapshot used to create this disk. This value identifies the exact ++ // instant snapshot that was used to create this persistent disk. For ++ // example, if you created the persistent disk from an instant snapshot ++ // that was later deleted and recreated under the same name, the source ++ // instant snapshot ID would identify the exact version of the instant ++ // snapshot that was used. ++ SourceInstantSnapshotId string `json:"sourceInstantSnapshotId,omitempty"` ++ + // SourceSnapshot: The source snapshot used to create this disk. You can + // provide this as a partial or full URL to the resource. For example, + // the following are valid values: - +@@ -9603,6 +9759,86 @@ func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type DiskAsyncReplication struct { ++ // ConsistencyGroupPolicy: [Output Only] URL of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicy string `json:"consistencyGroupPolicy,omitempty"` ++ ++ // ConsistencyGroupPolicyId: [Output Only] ID of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicyId string `json:"consistencyGroupPolicyId,omitempty"` ++ ++ // Disk: The other disk asynchronously replicated to or from the current ++ // disk. You can provide this as a partial or full URL to the resource. ++ // For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /disks/disk - projects/project/zones/zone/disks/disk - ++ // zones/zone/disks/disk ++ Disk string `json:"disk,omitempty"` ++ ++ // DiskId: [Output Only] The unique ID of the other disk asynchronously ++ // replicated to or from the current disk. This value identifies the ++ // exact disk that was used to create this replication. For example, if ++ // you started replicating the persistent disk from a disk that was ++ // later deleted and recreated under the same name, the disk ID would ++ // identify the exact version of the disk that was used. ++ DiskId string `json:"diskId,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "ConsistencyGroupPolicy") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ConsistencyGroupPolicy") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskAsyncReplication) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskAsyncReplication ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type DiskAsyncReplicationList struct { ++ AsyncReplicationDisk *DiskAsyncReplication `json:"asyncReplicationDisk,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "AsyncReplicationDisk") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AsyncReplicationDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskAsyncReplicationList) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskAsyncReplicationList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // DiskInstantiationConfig: A specification of the desired way to + // instantiate a disk in the instance template when its created from a + // source instance. +@@ -9944,6 +10180,70 @@ func (s *DiskParams) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type DiskResourceStatus struct { ++ AsyncPrimaryDisk *DiskResourceStatusAsyncReplicationStatus `json:"asyncPrimaryDisk,omitempty"` ++ ++ // AsyncSecondaryDisks: Key: disk, value: AsyncReplicationStatus message ++ AsyncSecondaryDisks map[string]DiskResourceStatusAsyncReplicationStatus `json:"asyncSecondaryDisks,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "AsyncPrimaryDisk") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AsyncPrimaryDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskResourceStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskResourceStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type DiskResourceStatusAsyncReplicationStatus struct { ++ // Possible values: ++ // "ACTIVE" - Replication is active. ++ // "CREATED" - Secondary disk is created and is waiting for ++ // replication to start. ++ // "STARTING" - Replication is starting. ++ // "STATE_UNSPECIFIED" ++ // "STOPPED" - Replication is stopped. ++ // "STOPPING" - Replication is stopping. ++ State string `json:"state,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "State") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "State") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskResourceStatusAsyncReplicationStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskResourceStatusAsyncReplicationStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // DiskType: Represents a Disk Type resource. Google Compute Engine has + // two Disk Type resources: * Regional + // (/compute/docs/reference/rest/beta/regionDiskTypes) * Zonal +@@ -10828,6 +11128,79 @@ func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type DisksStartAsyncReplicationRequest struct { ++ // AsyncSecondaryDisk: The secondary disk to start asynchronous ++ // replication to. You can provide this as a partial or full URL to the ++ // resource. For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /disks/disk - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /disks/disk - projects/project/zones/zone/disks/disk - ++ // projects/project/regions/region/disks/disk - zones/zone/disks/disk - ++ // regions/region/disks/disk ++ AsyncSecondaryDisk string `json:"asyncSecondaryDisk,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "AsyncSecondaryDisk") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AsyncSecondaryDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DisksStartAsyncReplicationRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod DisksStartAsyncReplicationRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// DisksStopGroupAsyncReplicationResource: A transient resource used in ++// compute.disks.stopGroupAsyncReplication and ++// compute.regionDisks.stopGroupAsyncReplication. It is only used to ++// process requests and is not persisted. ++type DisksStopGroupAsyncReplicationResource struct { ++ // ResourcePolicy: The URL of the DiskConsistencyGroupPolicy for the ++ // group of disks to stop. This may be a full or partial URL, such as: - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /resourcePolicies/resourcePolicy - ++ // projects/project/regions/region/resourcePolicies/resourcePolicy - ++ // regions/region/resourcePolicies/resourcePolicy ++ ResourcePolicy string `json:"resourcePolicy,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ResourcePolicy") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ResourcePolicy") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DisksStopGroupAsyncReplicationResource) MarshalJSON() ([]byte, error) { ++ type NoMethod DisksStopGroupAsyncReplicationResource ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // DisplayDevice: A set of Display Device options + type DisplayDevice struct { + // EnableDisplay: Defines whether the instance has Display enabled. +@@ -11469,6 +11842,15 @@ type ExternalVpnGatewayInterface struct { + // be an IP address from Google Compute Engine. + IpAddress string `json:"ipAddress,omitempty"` + ++ // Ipv6Address: IPv6 address of the interface in the external VPN ++ // gateway. This IPv6 address can be either from your on-premise gateway ++ // or another Cloud provider's VPN gateway, it cannot be an IP address ++ // from Google Compute Engine. Must specify an IPv6 address (not ++ // IPV4-mapped) using any format described in RFC 4291 (e.g. ++ // 2001:db8:0:0:2d9:51:0:0). The output format is RFC 5952 format (e.g. ++ // 2001:db8::2d9:51:0:0). ++ Ipv6Address string `json:"ipv6Address,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -12634,6 +13016,13 @@ type FirewallPolicyRule struct { + // single firewall policy rule. + RuleTupleCount int64 `json:"ruleTupleCount,omitempty"` + ++ // SecurityProfileGroup: A fully-qualified URL of a SecurityProfile ++ // resource instance. Example: ++ // https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group ++ // Must be specified if action = 'apply_security_profile_group' and ++ // cannot be specified for other actions. ++ SecurityProfileGroup string `json:"securityProfileGroup,omitempty"` ++ + // TargetResources: A list of network resource URLs to which this rule + // applies. This field allows you to control which network's VMs get + // this rule. If this field is left blank, all VMs within the +@@ -12656,6 +13045,11 @@ type FirewallPolicyRule struct { + // of instances that are applied with this rule. + TargetServiceAccounts []string `json:"targetServiceAccounts,omitempty"` + ++ // TlsInspect: Boolean flag indicating if the traffic should be TLS ++ // decrypted. Can be set only if action = 'apply_security_profile_group' ++ // and cannot be set for other actions. ++ TlsInspect bool `json:"tlsInspect,omitempty"` ++ + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +@@ -13077,9 +13471,10 @@ type ForwardingRule struct { + // Network: This field is not used for external load balancing. For + // Internal TCP/UDP Load Balancing, this field identifies the network + // that the load balanced IP should belong to for this Forwarding Rule. +- // If this field is not specified, the default network will be used. For +- // Private Service Connect forwarding rules that forward traffic to +- // Google APIs, a network must be provided. ++ // If the subnetwork is specified, the network of the subnetwork will be ++ // used. If neither subnetwork nor this field is specified, the default ++ // network will be used. For Private Service Connect forwarding rules ++ // that forward traffic to Google APIs, a network must be provided. + Network string `json:"network,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -13937,6 +14332,43 @@ func (s *GRPCHealthCheck) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type GlobalAddressesMoveRequest struct { ++ // Description: An optional destination address description if intended ++ // to be different from the source. ++ Description string `json:"description,omitempty"` ++ ++ // DestinationAddress: The URL of the destination address to move to. ++ // This can be a full or partial URL. For example, the following are all ++ // valid URLs to a address: - ++ // https://www.googleapis.com/compute/v1/projects/project ++ // /global/addresses/address - projects/project/global/addresses/address ++ // Note that destination project must be different from the source ++ // project. So /global/addresses/address is not valid partial url. ++ DestinationAddress string `json:"destinationAddress,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Description") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Description") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *GlobalAddressesMoveRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod GlobalAddressesMoveRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be attached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` +@@ -14226,8 +14658,8 @@ type GuestOsFeature struct { + // commas to separate values. Set to one or more of the following + // values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - + // UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - +- // SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling +- // guest operating system features. ++ // SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see ++ // Enabling guest operating system features. + // + // Possible values: + // "FEATURE_TYPE_UNSPECIFIED" +@@ -14235,6 +14667,7 @@ type GuestOsFeature struct { + // "MULTI_IP_SUBNET" + // "SECURE_BOOT" + // "SEV_CAPABLE" ++ // "SEV_LIVE_MIGRATABLE" + // "SEV_SNP_CAPABLE" + // "UEFI_COMPATIBLE" + // "VIRTIO_SCSI_MULTIQUEUE" +@@ -14552,12 +14985,12 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { + // (/compute/docs/reference/rest/beta/regionHealthChecks) Internal + // HTTP(S) load balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Traffic Director must use global +-// health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load ++// health checks (`compute.v1.healthChecks`). Internal TCP/UDP load + // balancers can use either regional or global health checks +-// (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). ++// (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). + // External HTTP(S), TCP proxy, and SSL proxy load balancers as well as + // managed instance group auto-healing must use global health checks +-// (`compute.v1.HealthChecks`). Backend service-based network load ++// (`compute.v1.healthChecks`). Backend service-based network load + // balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Target pool-based network load + // balancers must use legacy HTTP health checks +@@ -15655,7 +16088,7 @@ type HealthStatus struct { + // instance. + ForwardingRuleIp string `json:"forwardingRuleIp,omitempty"` + +- // HealthState: Health state of the instance. ++ // HealthState: Health state of the IPv4 address of the instance. + // + // Possible values: + // "HEALTHY" +@@ -15740,12 +16173,22 @@ type HealthStatusForNetworkEndpoint struct { + // the health checks configured. + // + // Possible values: +- // "DRAINING" +- // "HEALTHY" +- // "UNHEALTHY" +- // "UNKNOWN" ++ // "DRAINING" - Endpoint is being drained. ++ // "HEALTHY" - Endpoint is healthy. ++ // "UNHEALTHY" - Endpoint is unhealthy. ++ // "UNKNOWN" - Health status of the endpoint is unknown. + HealthState string `json:"healthState,omitempty"` + ++ // Ipv6HealthState: Health state of the ipv6 network endpoint determined ++ // based on the health checks configured. ++ // ++ // Possible values: ++ // "DRAINING" - Endpoint is being drained. ++ // "HEALTHY" - Endpoint is healthy. ++ // "UNHEALTHY" - Endpoint is unhealthy. ++ // "UNKNOWN" - Health status of the endpoint is unknown. ++ Ipv6HealthState string `json:"ipv6HealthState,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "BackendService") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -18136,9 +18579,9 @@ type Instance struct { + // cycle. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -19447,13 +19890,14 @@ type InstanceGroupManagerAutoHealingPolicy struct { + // HealthCheck: The URL for the health check that signals autohealing. + HealthCheck string `json:"healthCheck,omitempty"` + +- // InitialDelaySec: The number of seconds that the managed instance +- // group waits before it applies autohealing policies to new instances +- // or recently recreated instances. This initial delay allows instances +- // to initialize and run their startup scripts before the instance group +- // determines that they are UNHEALTHY. This prevents the managed +- // instance group from recreating its instances prematurely. This value +- // must be from range [0, 3600]. ++ // InitialDelaySec: The initial delay is the number of seconds that a ++ // new VM takes to initialize and run its startup script. During a VM's ++ // initial delay period, the MIG ignores unsuccessful health checks ++ // because the VM might be in the startup process. This prevents the MIG ++ // from prematurely recreating a VM. If the health check receives a ++ // healthy response during the initial delay, it indicates that the ++ // startup process is complete and the VM is ready. The value of initial ++ // delay must be between 0 and 3600 seconds. The default value is 0. + InitialDelaySec int64 `json:"initialDelaySec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthCheck") to +@@ -20213,7 +20657,9 @@ func (s *InstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, err + type InstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The URLs of one or more instances to delete. This can be a + // full URL or a partial URL, such as +- // zones/[ZONE]/instances/[INSTANCE_NAME]. ++ // zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have ++ // URL and can be deleted only by name. One cannot specify both URLs and ++ // names in a single request. + Instances []string `json:"instances,omitempty"` + + // SkipInstancesOnValidationError: Specifies whether the request should +@@ -22902,9 +23348,9 @@ type InstanceWithNamedPorts struct { + // Status: [Output Only] The status of the instance. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -23475,6 +23921,42 @@ func (s *InstancesSetNameRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InstancesSetSecurityPolicyRequest struct { ++ // NetworkInterfaces: The network interfaces that the security policy ++ // will be applied to. Network interfaces use the nicN naming format. ++ // You can only set a security policy for network interfaces with an ++ // access config. ++ NetworkInterfaces []string `json:"networkInterfaces,omitempty"` ++ ++ // SecurityPolicy: A full or partial URL to a security policy to add to ++ // this instance. If this field is set to an empty string it will remove ++ // the associated security policy. ++ SecurityPolicy string `json:"securityPolicy,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "NetworkInterfaces") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "NetworkInterfaces") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstancesSetSecurityPolicyRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod InstancesSetSecurityPolicyRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type InstancesSetServiceAccountRequest struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` +@@ -23537,6 +24019,801 @@ func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// InstantSnapshot: Represents a InstantSnapshot resource. You can use ++// instant snapshots to create disk rollback points quickly.. ++type InstantSnapshot struct { ++ // Architecture: [Output Only] The architecture of the instant snapshot. ++ // Valid values are ARM64 or X86_64. ++ // ++ // Possible values: ++ // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture ++ // is not set. ++ // "ARM64" - Machines with architecture ARM64 ++ // "X86_64" - Machines with architecture X86_64 ++ Architecture string `json:"architecture,omitempty"` ++ ++ // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text ++ // format. ++ CreationTimestamp string `json:"creationTimestamp,omitempty"` ++ ++ // Description: An optional description of this resource. Provide this ++ // property when you create the resource. ++ Description string `json:"description,omitempty"` ++ ++ // DiskSizeGb: [Output Only] Size of the source disk, specified in GB. ++ DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` ++ ++ // Id: [Output Only] The unique identifier for the resource. This ++ // identifier is defined by the server. ++ Id uint64 `json:"id,omitempty,string"` ++ ++ // Kind: [Output Only] Type of the resource. Always ++ // compute#instantSnapshot for InstantSnapshot resources. ++ Kind string `json:"kind,omitempty"` ++ ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // InstantSnapshot, which is essentially a hash of the labels set used ++ // for optimistic locking. The fingerprint is initially generated by ++ // Compute Engine and changes after every request to modify or update ++ // labels. You must always provide an up-to-date fingerprint hash in ++ // order to update or change labels, otherwise the request will fail ++ // with error 412 conditionNotMet. To see the latest fingerprint, make a ++ // get() request to retrieve a InstantSnapshot. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels to apply to this InstantSnapshot. These can be later ++ // modified by the setLabels method. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ ++ // Name: Name of the resource; provided by the client when the resource ++ // is created. The name must be 1-63 characters long, and comply with ++ // RFC1035. Specifically, the name must be 1-63 characters long and ++ // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means ++ // the first character must be a lowercase letter, and all following ++ // characters must be a dash, lowercase letter, or digit, except the ++ // last character, which cannot be a dash. ++ Name string `json:"name,omitempty"` ++ ++ // Region: [Output Only] URL of the region where the instant snapshot ++ // resides. You must specify this field as part of the HTTP request URL. ++ // It is not settable as a field in the request body. ++ Region string `json:"region,omitempty"` ++ ++ // ResourceStatus: [Output Only] Status information for the instant ++ // snapshot resource. ++ ResourceStatus *InstantSnapshotResourceStatus `json:"resourceStatus,omitempty"` ++ ++ // SatisfiesPzs: [Output Only] Reserved for future use. ++ SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for the resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // SelfLinkWithId: [Output Only] Server-defined URL for this resource's ++ // resource id. ++ SelfLinkWithId string `json:"selfLinkWithId,omitempty"` ++ ++ // SourceDisk: URL of the source disk used to create this instant ++ // snapshot. Note that the source disk must be in the same zone/region ++ // as the instant snapshot to be created. This can be a full or valid ++ // partial URL. For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /disks/disk - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /disks/disk - projects/project/zones/zone/disks/disk - ++ // projects/project/regions/region/disks/disk - zones/zone/disks/disk - ++ // regions/region/disks/disk ++ SourceDisk string `json:"sourceDisk,omitempty"` ++ ++ // SourceDiskId: [Output Only] The ID value of the disk used to create ++ // this InstantSnapshot. This value may be used to determine whether the ++ // InstantSnapshot was taken from the current or a previous instance of ++ // a given disk name. ++ SourceDiskId string `json:"sourceDiskId,omitempty"` ++ ++ // Status: [Output Only] The status of the instantSnapshot. This can be ++ // CREATING, DELETING, FAILED, or READY. ++ // ++ // Possible values: ++ // "CREATING" - InstantSnapshot creation is in progress. ++ // "DELETING" - InstantSnapshot is currently being deleted. ++ // "FAILED" - InstantSnapshot creation failed. ++ // "READY" - InstantSnapshot has been created successfully. ++ Status string `json:"status,omitempty"` ++ ++ // Zone: [Output Only] URL of the zone where the instant snapshot ++ // resides. You must specify this field as part of the HTTP request URL. ++ // It is not settable as a field in the request body. ++ Zone string `json:"zone,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Architecture") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Architecture") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshot) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshot ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotAggregatedList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InstantSnapshotsScopedList resources. ++ Items map[string]InstantSnapshotsScopedList `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always ++ // compute#instantSnapshotAggregatedList for aggregated lists of ++ // instantSnapshots. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token allows you to get the next ++ // page of results for list requests. If the number of results is larger ++ // than maxResults, use the nextPageToken as a value for the query ++ // parameter pageToken in the next list request. Subsequent list ++ // requests will have their own nextPageToken to continue paging through ++ // the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Unreachables: [Output Only] Unreachable resources. ++ Unreachables []string `json:"unreachables,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *InstantSnapshotAggregatedListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotAggregatedList) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotAggregatedList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InstantSnapshotAggregatedListWarning: [Output Only] Informational ++// warning message. ++type InstantSnapshotAggregatedListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InstantSnapshotAggregatedListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotAggregatedListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotAggregatedListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotAggregatedListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotAggregatedListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotAggregatedListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotExportParams struct { ++ // BaseInstantSnapshot: An optional base instant snapshot that this ++ // resource is compared against. If not specified, all blocks of this ++ // resource are exported. The base instant snapshot and this resource ++ // must be created from the same disk. The base instant snapshot must be ++ // created earlier in time than this resource. ++ BaseInstantSnapshot string `json:"baseInstantSnapshot,omitempty"` ++ ++ // BucketName: The name of an existing bucket in Cloud Storage where the ++ // changed blocks will be stored. The Google Service Account must have ++ // read and write access to this bucket. The bucket has to be in the ++ // same region as this resource. ++ BucketName string `json:"bucketName,omitempty"` ++ ++ // EncryptionKey: Encryption key used to encrypt the instant snapshot. ++ EncryptionKey *CustomerEncryptionKey `json:"encryptionKey,omitempty"` ++ ++ // ObjectName: Name of the output Bigstore object storing the changed ++ // blocks. Object name must be less than 1024 bytes in length. ++ ObjectName string `json:"objectName,omitempty"` ++ ++ // OutputType: The format of the output file. ++ // ++ // Possible values: ++ // "INVALID" ++ // "METADATA_AND_DATA" ++ // "METADATA_ONLY" ++ OutputType string `json:"outputType,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "BaseInstantSnapshot") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "BaseInstantSnapshot") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotExportParams) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotExportParams ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InstantSnapshotList: Contains a list of InstantSnapshot resources. ++type InstantSnapshotList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InstantSnapshot resources. ++ Items []*InstantSnapshot `json:"items,omitempty"` ++ ++ // Kind: Type of resource. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token allows you to get the next ++ // page of results for list requests. If the number of results is larger ++ // than maxResults, use the nextPageToken as a value for the query ++ // parameter pageToken in the next list request. Subsequent list ++ // requests will have their own nextPageToken to continue paging through ++ // the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *InstantSnapshotListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotList) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InstantSnapshotListWarning: [Output Only] Informational warning ++// message. ++type InstantSnapshotListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InstantSnapshotListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotResourceStatus struct { ++ // StorageSizeBytes: [Output Only] The storage size of this instant ++ // snapshot. ++ StorageSizeBytes int64 `json:"storageSizeBytes,omitempty,string"` ++ ++ // ForceSendFields is a list of field names (e.g. "StorageSizeBytes") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "StorageSizeBytes") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotResourceStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotResourceStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotsExportRequest struct { ++ // ExportParams: Parameters to export the changed blocks. ++ ExportParams *InstantSnapshotExportParams `json:"exportParams,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ExportParams") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ExportParams") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotsExportRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotsExportRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotsScopedList struct { ++ // InstantSnapshots: [Output Only] A list of instantSnapshots contained ++ // in this scope. ++ InstantSnapshots []*InstantSnapshot `json:"instantSnapshots,omitempty"` ++ ++ // Warning: [Output Only] Informational warning which replaces the list ++ // of instantSnapshots when the list is empty. ++ Warning *InstantSnapshotsScopedListWarning `json:"warning,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "InstantSnapshots") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "InstantSnapshots") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotsScopedList) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotsScopedList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InstantSnapshotsScopedListWarning: [Output Only] Informational ++// warning which replaces the list of instantSnapshots when the list is ++// empty. ++type InstantSnapshotsScopedListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InstantSnapshotsScopedListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotsScopedListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotsScopedListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotsScopedListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotsScopedListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotsScopedListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // Int64RangeMatch: HttpRouteRuleMatch criteria for field values that + // must stay within the specified integer range. + type Int64RangeMatch struct { +@@ -23572,9 +24849,9 @@ func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { + } + + // Interconnect: Represents an Interconnect resource. An Interconnect +-// resource is a dedicated connection between the GCP network and your +-// on-premises network. For more information, read the Dedicated +-// Interconnect Overview. ++// resource is a dedicated connection between the Google Cloud network ++// and your on-premises network. For more information, read the ++// Dedicated Interconnect Overview. + type Interconnect struct { + // AdminEnabled: Administrative status of the interconnect. When this is + // set to true, the Interconnect is functional and can carry traffic. +@@ -23714,6 +24991,11 @@ type Interconnect struct { + // provisioned in this interconnect. + ProvisionedLinkCount int64 `json:"provisionedLinkCount,omitempty"` + ++ // RemoteLocation: Indicates that this is a Cross-Cloud Interconnect. ++ // This field specifies the location outside of Google's network that ++ // the interconnect is connected to. ++ RemoteLocation string `json:"remoteLocation,omitempty"` ++ + // RequestedLinkCount: Target number of physical links in the link + // bundle, as requested by the customer. + RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` +@@ -23829,6 +25111,11 @@ type InterconnectAttachment struct { + // CloudRouterIpv6InterfaceId: This field is not available. + CloudRouterIpv6InterfaceId string `json:"cloudRouterIpv6InterfaceId,omitempty"` + ++ // ConfigurationConstraints: [Output Only] Constraints for this ++ // attachment, if any. The attachment does not work if these constraints ++ // are not met. ++ ConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"configurationConstraints,omitempty"` ++ + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` +@@ -23921,8 +25208,7 @@ type InterconnectAttachment struct { + // attachment. If this field is not specified when creating the VLAN + // attachment, then later on when creating an HA VPN gateway on this + // VLAN attachment, the HA VPN gateway's IP address is allocated from +- // the regional external IP address pool. Not currently available +- // publicly. ++ // the regional external IP address pool. + IpsecInternalAddresses []string `json:"ipsecInternalAddresses,omitempty"` + + // Kind: [Output Only] Type of the resource. Always +@@ -23999,6 +25285,14 @@ type InterconnectAttachment struct { + // body. + Region string `json:"region,omitempty"` + ++ // RemoteService: [Output Only] If the attachment is on a Cross-Cloud ++ // Interconnect connection, this field contains the interconnect's ++ // remote location service provider. Example values: "Amazon Web ++ // Services" "Microsoft Azure". The field is set only for attachments on ++ // Cross-Cloud Interconnect connections. Its value is copied from the ++ // InterconnectRemoteLocation remoteService field. ++ RemoteService string `json:"remoteService,omitempty"` ++ + // Router: URL of the Cloud Router to be used for dynamic routing. This + // router must be in the same region as this InterconnectAttachment. The + // InterconnectAttachment will automatically connect the Interconnect to +@@ -24062,6 +25356,16 @@ type InterconnectAttachment struct { + // yet, because turnup is not complete. + State string `json:"state,omitempty"` + ++ // SubnetLength: Length of the IPv4 subnet mask. Allowed values: - 29 ++ // (default) - 30 The default value is 29, except for Cross-Cloud ++ // Interconnect connections that use an InterconnectRemoteLocation with ++ // a constraints.subnetLengthRange.min equal to 30. For example, ++ // connections that use an Azure remote location fall into this ++ // category. In these cases, the default value is 30, and requesting 29 ++ // returns an error. Where both 29 and 30 are allowed, 29 is preferred, ++ // because it gives Google Cloud Support more debugging visibility. ++ SubnetLength int64 `json:"subnetLength,omitempty"` ++ + // Type: The type of interconnect attachment this is, which can take one + // of the following values: - DEDICATED: an attachment to a Dedicated + // Interconnect. - PARTNER: an attachment to a Partner Interconnect, +@@ -24300,6 +25604,87 @@ func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InterconnectAttachmentConfigurationConstraints struct { ++ // BgpMd5: [Output Only] Whether the attachment's BGP session ++ // requires/allows/disallows BGP MD5 authentication. This can take one ++ // of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. ++ // For example, a Cross-Cloud Interconnect connection to a remote cloud ++ // provider that requires BGP MD5 authentication has the ++ // interconnectRemoteLocation ++ // attachment_configuration_constraints.bgp_md5 field set to ++ // MD5_REQUIRED, and that property is propagated to the attachment. ++ // Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 ++ // is requested. ++ // ++ // Possible values: ++ // "MD5_OPTIONAL" - MD5_OPTIONAL: BGP MD5 authentication is supported ++ // and can optionally be configured. ++ // "MD5_REQUIRED" - MD5_REQUIRED: BGP MD5 authentication must be ++ // configured. ++ // "MD5_UNSUPPORTED" - MD5_UNSUPPORTED: BGP MD5 authentication must ++ // not be configured ++ BgpMd5 string `json:"bgpMd5,omitempty"` ++ ++ // BgpPeerAsnRanges: [Output Only] List of ASN ranges that the remote ++ // location is known to support. Formatted as an array of inclusive ++ // ranges {min: min-value, max: max-value}. For example, [{min: 123, ++ // max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or ++ // anything in the range 64512-65534. This field is only advisory. ++ // Although the API accepts other ranges, these are the ranges that we ++ // recommend. ++ BgpPeerAsnRanges []*InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange `json:"bgpPeerAsnRanges,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "BgpMd5") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "BgpMd5") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectAttachmentConfigurationConstraints) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectAttachmentConfigurationConstraints ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange struct { ++ Max int64 `json:"max,omitempty"` ++ ++ Min int64 `json:"min,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Max") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Max") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // InterconnectAttachmentList: Response to the list request, and + // contains a list of interconnect attachments. + type InterconnectAttachmentList struct { +@@ -25688,6 +27073,468 @@ func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// InterconnectRemoteLocation: Represents a Cross-Cloud Interconnect ++// Remote Location resource. You can use this resource to find remote ++// location details about an Interconnect attachment (VLAN). ++type InterconnectRemoteLocation struct { ++ // Address: [Output Only] The postal address of the Point of Presence, ++ // each line in the address is separated by a newline character. ++ Address string `json:"address,omitempty"` ++ ++ // AttachmentConfigurationConstraints: [Output Only] Subset of fields ++ // from InterconnectAttachment's |configurationConstraints| field that ++ // apply to all attachments for this remote location. ++ AttachmentConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"attachmentConfigurationConstraints,omitempty"` ++ ++ // City: [Output Only] Metropolitan area designator that indicates which ++ // city an interconnect is located. For example: "Chicago, IL", ++ // "Amsterdam, Netherlands". ++ City string `json:"city,omitempty"` ++ ++ // Constraints: [Output Only] Constraints on the parameters for creating ++ // Cross-Cloud Interconnect and associated InterconnectAttachments. ++ Constraints *InterconnectRemoteLocationConstraints `json:"constraints,omitempty"` ++ ++ // Continent: [Output Only] Continent for this location, which can take ++ // one of the following values: - AFRICA - ASIA_PAC - EUROPE - ++ // NORTH_AMERICA - SOUTH_AMERICA ++ // ++ // Possible values: ++ // "AFRICA" ++ // "ASIA_PAC" ++ // "EUROPE" ++ // "NORTH_AMERICA" ++ // "SOUTH_AMERICA" ++ Continent string `json:"continent,omitempty"` ++ ++ // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text ++ // format. ++ CreationTimestamp string `json:"creationTimestamp,omitempty"` ++ ++ // Description: [Output Only] An optional description of the resource. ++ Description string `json:"description,omitempty"` ++ ++ // FacilityProvider: [Output Only] The name of the provider for this ++ // facility (e.g., EQUINIX). ++ FacilityProvider string `json:"facilityProvider,omitempty"` ++ ++ // FacilityProviderFacilityId: [Output Only] A provider-assigned ++ // Identifier for this facility (e.g., Ashburn-DC1). ++ FacilityProviderFacilityId string `json:"facilityProviderFacilityId,omitempty"` ++ ++ // Id: [Output Only] The unique identifier for the resource. This ++ // identifier is defined by the server. ++ Id uint64 `json:"id,omitempty,string"` ++ ++ // Kind: [Output Only] Type of the resource. Always ++ // compute#interconnectRemoteLocation for interconnect remote locations. ++ Kind string `json:"kind,omitempty"` ++ ++ // Lacp: [Output Only] Link Aggregation Control Protocol (LACP) ++ // constraints, which can take one of the following values: ++ // LACP_SUPPORTED, LACP_UNSUPPORTED ++ // ++ // Possible values: ++ // "LACP_SUPPORTED" - LACP_SUPPORTED: LACP is supported, and enabled ++ // by default on the Cross-Cloud Interconnect. ++ // "LACP_UNSUPPORTED" - LACP_UNSUPPORTED: LACP is not supported and is ++ // not be enabled on this port. GetDiagnostics shows ++ // bundleAggregationType as "static". GCP does not support LAGs without ++ // LACP, so requestedLinkCount must be 1. ++ Lacp string `json:"lacp,omitempty"` ++ ++ // MaxLagSize100Gbps: [Output Only] The maximum number of 100 Gbps ports ++ // supported in a link aggregation group (LAG). When linkType is 100 ++ // Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps. ++ MaxLagSize100Gbps int64 `json:"maxLagSize100Gbps,omitempty"` ++ ++ // MaxLagSize10Gbps: [Output Only] The maximum number of 10 Gbps ports ++ // supported in a link aggregation group (LAG). When linkType is 10 ++ // Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps. ++ MaxLagSize10Gbps int64 `json:"maxLagSize10Gbps,omitempty"` ++ ++ // Name: [Output Only] Name of the resource. ++ Name string `json:"name,omitempty"` ++ ++ // PeeringdbFacilityId: [Output Only] The peeringdb identifier for this ++ // facility (corresponding with a netfac type in peeringdb). ++ PeeringdbFacilityId string `json:"peeringdbFacilityId,omitempty"` ++ ++ // PermittedConnections: [Output Only] Permitted connections. ++ PermittedConnections []*InterconnectRemoteLocationPermittedConnections `json:"permittedConnections,omitempty"` ++ ++ // RemoteService: [Output Only] Indicates the service provider present ++ // at the remote location. Example values: "Amazon Web Services", ++ // "Microsoft Azure". ++ RemoteService string `json:"remoteService,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for the resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Status: [Output Only] The status of this InterconnectRemoteLocation, ++ // which can take one of the following values: - CLOSED: The ++ // InterconnectRemoteLocation is closed and is unavailable for ++ // provisioning new Cross-Cloud Interconnects. - AVAILABLE: The ++ // InterconnectRemoteLocation is available for provisioning new ++ // Cross-Cloud Interconnects. ++ // ++ // Possible values: ++ // "AVAILABLE" - The InterconnectRemoteLocation is available for ++ // provisioning new Cross-Cloud Interconnects. ++ // "CLOSED" - The InterconnectRemoteLocation is closed for ++ // provisioning new Cross-Cloud Interconnects. ++ Status string `json:"status,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Address") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Address") to include in ++ // API requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocation) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocation ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationConstraints struct { ++ // PortPairRemoteLocation: [Output Only] Port pair remote location ++ // constraints, which can take one of the following values: ++ // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, ++ // PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to ++ // individual ports, but the UI uses this field when ordering a pair of ++ // ports, to prevent users from accidentally ordering something that is ++ // incompatible with their cloud provider. Specifically, when ordering a ++ // redundant pair of Cross-Cloud Interconnect ports, and one of them ++ // uses a remote location with portPairMatchingRemoteLocation set to ++ // matching, the UI requires that both ports use the same remote ++ // location. ++ // ++ // Possible values: ++ // "PORT_PAIR_MATCHING_REMOTE_LOCATION" - If ++ // PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider ++ // allocates ports in pairs, and the user should choose the same remote ++ // location for both ports. ++ // "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" - If ++ // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision ++ // a redundant pair of Cross-Cloud Interconnects using two different ++ // remote locations in the same city. ++ PortPairRemoteLocation string `json:"portPairRemoteLocation,omitempty"` ++ ++ // PortPairVlan: [Output Only] Port pair VLAN constraints, which can ++ // take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, ++ // PORT_PAIR_MATCHING_VLAN ++ // ++ // Possible values: ++ // "PORT_PAIR_MATCHING_VLAN" - If PORT_PAIR_MATCHING_VLAN, the ++ // Interconnect for this attachment is part of a pair of ports that ++ // should have matching VLAN allocations. This occurs with Cross-Cloud ++ // Interconnect to Azure remote locations. While GCP's API does not ++ // explicitly group pairs of ports, the UI uses this field to ensure ++ // matching VLAN ids when configuring a redundant VLAN pair. ++ // "PORT_PAIR_UNCONSTRAINED_VLAN" - PORT_PAIR_UNCONSTRAINED_VLAN means ++ // there is no constraint. ++ PortPairVlan string `json:"portPairVlan,omitempty"` ++ ++ // SubnetLengthRange: [Output Only] [min-length, max-length] The minimum ++ // and maximum value (inclusive) for the IPv4 subnet length. For ++ // example, an interconnectRemoteLocation for Azure has {min: 30, max: ++ // 30} because Azure requires /30 subnets. This range specifies the ++ // values supported by both cloud providers. Interconnect currently ++ // supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no ++ // constraint on IPv4 subnet length, the range would thus be {min: 29, ++ // max: 30}. ++ SubnetLengthRange *InterconnectRemoteLocationConstraintsSubnetLengthRange `json:"subnetLengthRange,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "PortPairRemoteLocation") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "PortPairRemoteLocation") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationConstraints) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationConstraints ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationConstraintsSubnetLengthRange struct { ++ Max int64 `json:"max,omitempty"` ++ ++ Min int64 `json:"min,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Max") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Max") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationConstraintsSubnetLengthRange) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationConstraintsSubnetLengthRange ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectRemoteLocationList: Response to the list request, and ++// contains a list of interconnect remote locations. ++type InterconnectRemoteLocationList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InterconnectRemoteLocation resources. ++ Items []*InterconnectRemoteLocation `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always ++ // compute#interconnectRemoteLocationList for lists of interconnect ++ // remote locations. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token lets you get the next page of ++ // results for list requests. If the number of results is larger than ++ // maxResults, use the nextPageToken as a value for the query parameter ++ // pageToken in the next list request. Subsequent list requests will ++ // have their own nextPageToken to continue paging through the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *InterconnectRemoteLocationListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectRemoteLocationListWarning: [Output Only] Informational ++// warning message. ++type InterconnectRemoteLocationListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InterconnectRemoteLocationListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationPermittedConnections struct { ++ // InterconnectLocation: [Output Only] URL of an Interconnect location ++ // that is permitted to connect to this Interconnect remote location. ++ InterconnectLocation string `json:"interconnectLocation,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "InterconnectLocation") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "InterconnectLocation") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationPermittedConnections ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // InterconnectsGetDiagnosticsResponse: Response for the + // InterconnectsGetDiagnosticsRequest. + type InterconnectsGetDiagnosticsResponse struct { +@@ -26930,7 +28777,7 @@ type MachineTypeAccelerators struct { + GuestAcceleratorCount int64 `json:"guestAcceleratorCount,omitempty"` + + // GuestAcceleratorType: The accelerator type resource name, not a full +- // URL, e.g. 'nvidia-tesla-k80'. ++ // URL, e.g. nvidia-tesla-t4. + GuestAcceleratorType string `json:"guestAcceleratorType,omitempty"` + + // ForceSendFields is a list of field names (e.g. +@@ -27589,9 +29436,9 @@ type ManagedInstance struct { + // is empty when the instance does not exist. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -28130,7 +29977,7 @@ type Network struct { + FirewallPolicy string `json:"firewallPolicy,omitempty"` + + // GatewayIPv4: [Output Only] The gateway address for default routing +- // out of the network, selected by GCP. ++ // out of the network, selected by Google Cloud. + GatewayIPv4 string `json:"gatewayIPv4,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This +@@ -28242,10 +30089,9 @@ type NetworkAttachment struct { + // property when you create the resource. + Description string `json:"description,omitempty"` + +- // Fingerprint: [Output Only] Fingerprint of this resource. A hash of +- // the contents stored in this object. This field is used in optimistic +- // locking. An up-to-date fingerprint must be provided in order to +- // patch. ++ // Fingerprint: Fingerprint of this resource. A hash of the contents ++ // stored in this object. This field is used in optimistic locking. An ++ // up-to-date fingerprint must be provided in order to patch. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The +@@ -28265,7 +30111,11 @@ type NetworkAttachment struct { + Name string `json:"name,omitempty"` + + // Network: [Output Only] The URL of the network which the Network +- // Attachment belongs to. ++ // Attachment belongs to. Practically it is inferred by fetching the ++ // network of the first subnetwork associated. Because it is required ++ // that all the subnetworks must be from the same network, it is assured ++ // that the Network Attachment belongs to the same network as all the ++ // subnetworks. + Network string `json:"network,omitempty"` + + // ProducerAcceptLists: Projects that are allowed to connect to this +@@ -28516,7 +30366,7 @@ func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, erro + // NetworkAttachmentConnectedEndpoint: [Output Only] A connection + // connected to this network attachment. + type NetworkAttachmentConnectedEndpoint struct { +- // IpAddress: The IP address assigned to the producer instance network ++ // IpAddress: The IPv4 address assigned to the producer instance network + // interface. This value will be a range in case of Serverless. + IpAddress string `json:"ipAddress,omitempty"` + +@@ -28524,7 +30374,7 @@ type NetworkAttachmentConnectedEndpoint struct { + // the IP was assigned. + ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` + +- // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork ++ // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork. + SecondaryIpCidrRanges []string `json:"secondaryIpCidrRanges,omitempty"` + + // Status: The status of a connected endpoint to this network +@@ -29396,6 +31246,9 @@ type NetworkEndpoint struct { + // the network endpoint group belongs to will be used. + IpAddress string `json:"ipAddress,omitempty"` + ++ // Ipv6Address: Optional IPv6 address of network endpoint. ++ Ipv6Address string `json:"ipv6Address,omitempty"` ++ + // Port: Optional port number of network endpoint. If not specified, the + // defaultPort for the network endpoint group will be used. + Port int64 `json:"port,omitempty"` +@@ -30850,10 +32703,11 @@ type NetworkInterface struct { + // number. It'll be empty if not specified by the users. + QueueCount int64 `json:"queueCount,omitempty"` + +- // StackType: The stack type for this network interface to identify +- // whether the IPv6 feature is enabled or not. If not specified, +- // IPV4_ONLY will be used. This field can be both set at instance +- // creation and update network interface operations. ++ // StackType: The stack type for this network interface. To assign only ++ // IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 ++ // addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This ++ // field can be both set at instance creation and update network ++ // interface operations. + // + // Possible values: + // "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 +@@ -38186,6 +40040,7 @@ type Quota struct { + // "COMMITTED_NVIDIA_A100_80GB_GPUS" + // "COMMITTED_NVIDIA_A100_GPUS" + // "COMMITTED_NVIDIA_K80_GPUS" ++ // "COMMITTED_NVIDIA_L4_GPUS" + // "COMMITTED_NVIDIA_P100_GPUS" + // "COMMITTED_NVIDIA_P4_GPUS" + // "COMMITTED_NVIDIA_T4_GPUS" +@@ -38237,11 +40092,15 @@ type Quota struct { + // "NETWORK_ATTACHMENTS" + // "NETWORK_ENDPOINT_GROUPS" + // "NETWORK_FIREWALL_POLICIES" ++ // "NET_LB_SECURITY_POLICIES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION" + // "NODE_GROUPS" + // "NODE_TEMPLATES" + // "NVIDIA_A100_80GB_GPUS" + // "NVIDIA_A100_GPUS" + // "NVIDIA_K80_GPUS" ++ // "NVIDIA_L4_GPUS" + // "NVIDIA_P100_GPUS" + // "NVIDIA_P100_VWS_GPUS" + // "NVIDIA_P4_GPUS" +@@ -38256,6 +40115,7 @@ type Quota struct { + // "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS" + // "PREEMPTIBLE_NVIDIA_A100_GPUS" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" ++ // "PREEMPTIBLE_NVIDIA_L4_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_GPUS" +@@ -38280,6 +40140,7 @@ type Quota struct { + // "ROUTES" + // "SECURITY_POLICIES" + // "SECURITY_POLICIES_PER_REGION" ++ // "SECURITY_POLICY_ADVANCED_RULES_PER_REGION" + // "SECURITY_POLICY_CEVAL_RULES" + // "SECURITY_POLICY_RULES" + // "SECURITY_POLICY_RULES_PER_REGION" +@@ -38519,6 +40380,44 @@ func (s *Region) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type RegionAddressesMoveRequest struct { ++ // Description: An optional destination address description if intended ++ // to be different from the source. ++ Description string `json:"description,omitempty"` ++ ++ // DestinationAddress: The URL of the destination address to move to. ++ // This can be a full or partial URL. For example, the following are all ++ // valid URLs to a address: - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /addresses/address - ++ // projects/project/regions/region/addresses/address Note that ++ // destination project must be different from the source project. So ++ // /regions/region/addresses/address is not valid partial url. ++ DestinationAddress string `json:"destinationAddress,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Description") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Description") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionAddressesMoveRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionAddressesMoveRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // RegionAutoscalerList: Contains a list of autoscalers. + type RegionAutoscalerList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the +@@ -39009,238 +40908,52 @@ func (s *RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// RegionInstanceGroupList: Contains a list of InstanceGroup resources. +-type RegionInstanceGroupList struct { +- // Id: [Output Only] Unique identifier for the resource; defined by the +- // server. +- Id string `json:"id,omitempty"` +- +- // Items: A list of InstanceGroup resources. +- Items []*InstanceGroup `json:"items,omitempty"` +- +- // Kind: The resource type. +- Kind string `json:"kind,omitempty"` +- +- // NextPageToken: [Output Only] This token allows you to get the next +- // page of results for list requests. If the number of results is larger +- // than maxResults, use the nextPageToken as a value for the query +- // parameter pageToken in the next list request. Subsequent list +- // requests will have their own nextPageToken to continue paging through +- // the results. +- NextPageToken string `json:"nextPageToken,omitempty"` +- +- // SelfLink: [Output Only] Server-defined URL for this resource. +- SelfLink string `json:"selfLink,omitempty"` +- +- // Warning: [Output Only] Informational warning message. +- Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` +- +- // ServerResponse contains the HTTP response code and headers from the +- // server. +- googleapi.ServerResponse `json:"-"` +- +- // ForceSendFields is a list of field names (e.g. "Id") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Id") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { +- type NoMethod RegionInstanceGroupList +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-// RegionInstanceGroupListWarning: [Output Only] Informational warning +-// message. +-type RegionInstanceGroupListWarning struct { +- // Code: [Output Only] A warning code, if applicable. For example, +- // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in +- // the response. +- // +- // Possible values: +- // "CLEANUP_FAILED" - Warning about failed cleanup of transient +- // changes made by a failed operation. +- // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was +- // created. +- // "DEPRECATED_TYPE_USED" - When deploying and at least one of the +- // resources has a type marked as deprecated +- // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk +- // that is larger than image size. +- // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the +- // resources has a type marked as experimental +- // "EXTERNAL_API_WARNING" - Warning that is present in an external api +- // call +- // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been +- // overridden. Deprecated unused field. +- // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an +- // injected kernel, which is deprecated. +- // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV +- // backend service is associated with a health check that is not of type +- // HTTP/HTTPS/HTTP2. +- // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a +- // exceedingly large number of resources +- // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type +- // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is +- // not assigned to an instance on the network. +- // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot +- // ip forward. +- // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's +- // nextHopInstance URL refers to an instance that does not have an ipv6 +- // interface on the same network as the route. +- // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL +- // refers to an instance that does not exist. +- // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance +- // URL refers to an instance that is not on the same network as the +- // route. +- // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not +- // have a status of RUNNING. +- // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to +- // continue the process despite the mentioned error. +- // "NO_RESULTS_ON_PAGE" - No results are present on a particular list +- // page. +- // "PARTIAL_SUCCESS" - Success is reported, but some results may be +- // missing due to errors +- // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource +- // that requires a TOS they have not accepted. +- // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a +- // resource is in use. +- // "RESOURCE_NOT_DELETED" - One or more of the resources set to +- // auto-delete could not be deleted because they were in use. +- // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is +- // ignored. +- // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in +- // instance group manager is valid as such, but its application does not +- // make a lot of sense, because it allows only single instance in +- // instance group. +- // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema +- // are present +- // "UNREACHABLE" - A given scope cannot be reached. +- Code string `json:"code,omitempty"` +- +- // Data: [Output Only] Metadata about this warning in key: value format. +- // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" +- // } +- Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` +- +- // Message: [Output Only] A human-readable description of the warning +- // code. +- Message string `json:"message,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "Code") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Code") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { +- type NoMethod RegionInstanceGroupListWarning +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type RegionInstanceGroupListWarningData struct { +- // Key: [Output Only] A key that provides more detail on the warning +- // being returned. For example, for warnings where there are no results +- // in a list request for a particular zone, this key might be scope and +- // the key value might be the zone name. Other examples might be a key +- // indicating a deprecated resource and a suggested replacement, or a +- // warning about invalid network settings (for example, if an instance +- // attempts to perform IP forwarding but is not enabled for IP +- // forwarding). +- Key string `json:"key,omitempty"` +- +- // Value: [Output Only] A warning data value corresponding to the key. +- Value string `json:"value,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "Key") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Key") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { +- type NoMethod RegionInstanceGroupListWarningData +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-// RegionInstanceGroupManagerDeleteInstanceConfigReq: +-// RegionInstanceGroupManagers.deletePerInstanceConfigs +-type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { +- // Names: The list of instance names for which we want to delete +- // per-instance configs on this managed instance group. +- Names []string `json:"names,omitempty"` ++type RegionDisksStartAsyncReplicationRequest struct { ++ // AsyncSecondaryDisk: The secondary disk to start asynchronous ++ // replication to. You can provide this as a partial or full URL to the ++ // resource. For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /disks/disk - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /disks/disk - projects/project/zones/zone/disks/disk - ++ // projects/project/regions/region/disks/disk - zones/zone/disks/disk - ++ // regions/region/disks/disk ++ AsyncSecondaryDisk string `json:"asyncSecondaryDisk,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "Names") to +- // unconditionally include in API requests. By default, fields with ++ // ForceSendFields is a list of field names (e.g. "AsyncSecondaryDisk") ++ // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Names") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. ++ // NullFields is a list of field names (e.g. "AsyncSecondaryDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. + NullFields []string `json:"-"` + } + +-func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { +- type NoMethod RegionInstanceGroupManagerDeleteInstanceConfigReq ++func (s *RegionDisksStartAsyncReplicationRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionDisksStartAsyncReplicationRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// RegionInstanceGroupManagerList: Contains a list of managed instance +-// groups. +-type RegionInstanceGroupManagerList struct { ++// RegionInstanceGroupList: Contains a list of InstanceGroup resources. ++type RegionInstanceGroupList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + +- // Items: A list of InstanceGroupManager resources. +- Items []*InstanceGroupManager `json:"items,omitempty"` ++ // Items: A list of InstanceGroup resources. ++ Items []*InstanceGroup `json:"items,omitempty"` + +- // Kind: [Output Only] The resource type, which is always +- // compute#instanceGroupManagerList for a list of managed instance +- // groups that exist in th regional scope. ++ // Kind: The resource type. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next +@@ -39255,7 +40968,229 @@ type RegionInstanceGroupManagerList struct { + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. +- Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` ++ Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionInstanceGroupList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// RegionInstanceGroupListWarning: [Output Only] Informational warning ++// message. ++type RegionInstanceGroupListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionInstanceGroupListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type RegionInstanceGroupListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionInstanceGroupListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// RegionInstanceGroupManagerDeleteInstanceConfigReq: ++// RegionInstanceGroupManagers.deletePerInstanceConfigs ++type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { ++ // Names: The list of instance names for which we want to delete ++ // per-instance configs on this managed instance group. ++ Names []string `json:"names,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Names") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Names") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionInstanceGroupManagerDeleteInstanceConfigReq ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// RegionInstanceGroupManagerList: Contains a list of managed instance ++// groups. ++type RegionInstanceGroupManagerList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InstanceGroupManager resources. ++ Items []*InstanceGroupManager `json:"items,omitempty"` ++ ++ // Kind: [Output Only] The resource type, which is always ++ // compute#instanceGroupManagerList for a list of managed instance ++ // groups that exist in th regional scope. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token allows you to get the next ++ // page of results for list requests. If the number of results is larger ++ // than maxResults, use the nextPageToken as a value for the query ++ // parameter pageToken in the next list request. Subsequent list ++ // requests will have their own nextPageToken to continue paging through ++ // the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. +@@ -40337,6 +42272,33 @@ func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type RegionInstantSnapshotsExportRequest struct { ++ // ExportParams: Parameters to export the changed blocks. ++ ExportParams *InstantSnapshotExportParams `json:"exportParams,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ExportParams") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ExportParams") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionInstantSnapshotsExportRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionInstantSnapshotsExportRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // RegionList: Contains a list of region resources. + type RegionList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the +@@ -41767,6 +43729,10 @@ type ResourcePolicy struct { + + Description string `json:"description,omitempty"` + ++ // DiskConsistencyGroupPolicy: Resource policy for disk consistency ++ // groups. ++ DiskConsistencyGroupPolicy *ResourcePolicyDiskConsistencyGroupPolicy `json:"diskConsistencyGroupPolicy,omitempty"` ++ + // GroupPlacementPolicy: Resource policy for instances for placement + // configuration. + GroupPlacementPolicy *ResourcePolicyGroupPlacementPolicy `json:"groupPlacementPolicy,omitempty"` +@@ -42078,6 +44044,11 @@ func (s *ResourcePolicyDailyCycle) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// ResourcePolicyDiskConsistencyGroupPolicy: Resource policy for disk ++// consistency groups. ++type ResourcePolicyDiskConsistencyGroupPolicy struct { ++} ++ + // ResourcePolicyGroupPlacementPolicy: A GroupPlacementPolicy specifies + // resource placement configuration. It specifies the failure bucket + // separation as well as network locality +@@ -43775,6 +45746,17 @@ type RouterBgpPeer struct { + // Bfd: BFD configuration for the BGP peering. + Bfd *RouterBgpPeerBfd `json:"bfd,omitempty"` + ++ // CustomLearnedIpRanges: A list of user-defined custom learned route IP ++ // address ranges for a BGP session. ++ CustomLearnedIpRanges []*RouterBgpPeerCustomLearnedIpRange `json:"customLearnedIpRanges,omitempty"` ++ ++ // CustomLearnedRoutePriority: The user-defined custom learned route ++ // priority for a BGP session. This value is applied to all custom ++ // learned route ranges for the session. You can choose a value from `0` ++ // to `65335`. If you don't provide a value, Google Cloud assigns a ++ // priority of `100` to the ranges. ++ CustomLearnedRoutePriority int64 `json:"customLearnedRoutePriority,omitempty"` ++ + // Enable: The status of the BGP peer connection. If set to FALSE, any + // active session with the peer is terminated and all associated routing + // information is removed. If set to TRUE, the peer connection can be +@@ -43935,6 +45917,36 @@ func (s *RouterBgpPeerBfd) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type RouterBgpPeerCustomLearnedIpRange struct { ++ // Range: The custom learned route IP address range. Must be a valid ++ // CIDR-formatted prefix. If an IP address is provided without a subnet ++ // mask, it is interpreted as, for IPv4, a `/32` singular IP address ++ // range, and, for IPv6, `/128`. ++ Range string `json:"range,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Range") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Range") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RouterBgpPeerCustomLearnedIpRange) MarshalJSON() ([]byte, error) { ++ type NoMethod RouterBgpPeerCustomLearnedIpRange ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type RouterInterface struct { + // IpRange: IP address and range of the interface. The IP range must be + // in the RFC3927 link-local IP address space. The value must be a +@@ -44264,6 +46276,21 @@ func (s *RouterMd5AuthenticationKey) MarshalJSON() ([]byte, error) { + // that would be used for NAT. GCP would auto-allocate ephemeral IPs if + // no external IPs are provided. + type RouterNat struct { ++ // AutoNetworkTier: The network tier to use when automatically reserving ++ // IP addresses. Must be one of: PREMIUM, STANDARD. If not specified, ++ // PREMIUM tier will be used. ++ // ++ // Possible values: ++ // "FIXED_STANDARD" - Public internet quality with fixed bandwidth. ++ // "PREMIUM" - High quality, Google-grade network tier, support for ++ // all networking products. ++ // "STANDARD" - Public internet quality, only limited support for ++ // other networking products. ++ // "STANDARD_OVERRIDES_FIXED_STANDARD" - (Output only) Temporary tier ++ // for FIXED_STANDARD when fixed standard tier is expired or not ++ // configured. ++ AutoNetworkTier string `json:"autoNetworkTier,omitempty"` ++ + // DrainNatIps: A list of URLs of the IP resources to be drained. These + // IPs must be valid static external IPs that have been assigned to the + // NAT. These IPs should be used for updating/patching a NAT only. +@@ -44347,10 +46374,9 @@ type RouterNat struct { + // in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list + // of Subnetworks are allowed to Nat (specified in the field subnetwork + // below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. +- // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or +- // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any +- // other Router.Nat section in any Router for this network in this +- // region. ++ // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then ++ // there should not be any other Router.Nat section in any Router for ++ // this network in this region. + // + // Possible values: + // "ALL_SUBNETWORKS_ALL_IP_RANGES" - All the IP ranges in every +@@ -44382,7 +46408,7 @@ type RouterNat struct { + // to 30s if not set. + UdpIdleTimeoutSec int64 `json:"udpIdleTimeoutSec,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "DrainNatIps") to ++ // ForceSendFields is a list of field names (e.g. "AutoNetworkTier") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -44390,12 +46416,13 @@ type RouterNat struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "DrainNatIps") to include +- // in API requests with the JSON null value. By default, fields with +- // empty values are omitted from API requests. However, any field with +- // an empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. ++ // NullFields is a list of field names (e.g. "AutoNetworkTier") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. + NullFields []string `json:"-"` + } + +@@ -46152,6 +48179,15 @@ type SecurityPolicy struct { + // "FIREWALL" + Type string `json:"type,omitempty"` + ++ // UserDefinedFields: Definitions of user-defined fields for ++ // CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to ++ // 4 bytes extracted from a fixed offset in the packet, relative to the ++ // IPv4, IPv6, TCP, or UDP header, with an optional mask to select ++ // certain bits. Rules may then specify matching values for these ++ // fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" ++ // base: IPV4 offset: 6 size: 2 mask: "0x1fff" ++ UserDefinedFields []*SecurityPolicyUserDefinedField `json:"userDefinedFields,omitempty"` ++ + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +@@ -46268,13 +48304,17 @@ func (s *SecurityPolicyAdaptiveProtectionConfigAutoDeployConfig) UnmarshalJSON(d + } + + // SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig: +-// Configuration options for L7 DDoS detection. ++// Configuration options for L7 DDoS detection. This field is only ++// supported in Global Security Policies of type CLOUD_ARMOR. + type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { +- // Enable: If set to true, enables CAAP for L7 DDoS detection. ++ // Enable: If set to true, enables CAAP for L7 DDoS detection. This ++ // field is only supported in Global Security Policies of type ++ // CLOUD_ARMOR. + Enable bool `json:"enable,omitempty"` + + // RuleVisibility: Rule visibility can be one of the following: STANDARD +- // - opaque rules. (default) PREMIUM - transparent rules. ++ // - opaque rules. (default) PREMIUM - transparent rules. This field is ++ // only supported in Global Security Policies of type CLOUD_ARMOR. + // + // Possible values: + // "PREMIUM" +@@ -46319,6 +48359,10 @@ type SecurityPolicyAdvancedOptionsConfig struct { + // "VERBOSE" + LogLevel string `json:"logLevel,omitempty"` + ++ // UserIpRequestHeaders: An optional list of case-insensitive request ++ // header names to use for resolving the callers client IP address. ++ UserIpRequestHeaders []string `json:"userIpRequestHeaders,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "JsonCustomConfig") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -46640,7 +48684,8 @@ type SecurityPolicyRecaptchaOptionsConfig struct { + // GOOGLE_RECAPTCHA under the security policy. The specified site key + // needs to be created from the reCAPTCHA API. The user is responsible + // for the validity of the specified site key. If not specified, a +- // Google-managed site key is used. ++ // Google-managed site key is used. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectSiteKey string `json:"redirectSiteKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RedirectSiteKey") to +@@ -46708,10 +48753,11 @@ type SecurityPolicyRule struct { + // rate_limit_options to be set. - redirect: redirect to a different + // target. This can either be an internal reCAPTCHA redirect, or an + // external URL-based redirect via a 302 response. Parameters for this +- // action can be configured via redirectOptions. - throttle: limit +- // client traffic to the configured threshold. Configure parameters for +- // this action in rateLimitOptions. Requires rate_limit_options to be +- // set for this. ++ // action can be configured via redirectOptions. This action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. - ++ // throttle: limit client traffic to the configured threshold. Configure ++ // parameters for this action in rateLimitOptions. Requires ++ // rate_limit_options to be set for this. + Action string `json:"action,omitempty"` + + // Description: An optional description of this resource. Provide this +@@ -46735,7 +48781,8 @@ type SecurityPolicyRule struct { + EnableLogging bool `json:"enableLogging,omitempty"` + + // HeaderAction: Optional, additional actions that are performed on +- // headers. ++ // headers. This field is only supported in Global Security Policies of ++ // type CLOUD_ARMOR. + HeaderAction *SecurityPolicyRuleHttpHeaderAction `json:"headerAction,omitempty"` + + // Kind: [Output only] Type of the resource. Always +@@ -46746,6 +48793,31 @@ type SecurityPolicyRule struct { + // If it evaluates to true, the corresponding 'action' is enforced. + Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` + ++ // NetworkMatch: A match condition that incoming packets are evaluated ++ // against for CLOUD_ARMOR_NETWORK security policies. If it matches, the ++ // corresponding 'action' is enforced. The match criteria for a rule ++ // consists of built-in match fields (like 'srcIpRanges') and ++ // potentially multiple user-defined match fields ('userDefinedFields'). ++ // Field values may be extracted directly from the packet or derived ++ // from it (e.g. 'srcRegionCodes'). Some fields may not be present in ++ // every packet (e.g. 'srcPorts'). A user-defined field is only present ++ // if the base header is found in the packet and the entire field is in ++ // bounds. Each match field may specify which values can match it, ++ // listing one or more ranges, prefixes, or exact values that are ++ // considered a match for the field. A field value must be present in ++ // order to match a specified match field. If no match values are ++ // specified for a match field, then any field value is considered to ++ // match it, and it's not required to be present. For strings specifying ++ // '*' is also equivalent to match all. For a packet to match a rule, ++ // all specified match fields must match the corresponding field values ++ // derived from the packet. Example: networkMatch: srcIpRanges: - ++ // "192.0.2.0/24" - "198.51.100.0/24" userDefinedFields: - name: ++ // "ipv4_fragment_offset" values: - "1-0x1fff" The above match condition ++ // matches packets with a source IP in 192.0.2.0/24 or 198.51.100.0/24 ++ // and a user-defined field named "ipv4_fragment_offset" with a value ++ // between 1 and 0x1fff inclusive. ++ NetworkMatch *SecurityPolicyRuleNetworkMatcher `json:"networkMatch,omitempty"` ++ + // PreconfiguredWafConfig: Preconfigured WAF configuration to be applied + // for the rule. If the rule does not evaluate preconfigured WAF rules, + // i.e., if evaluatePreconfiguredWaf() is not used, this field will have +@@ -46766,7 +48838,8 @@ type SecurityPolicyRule struct { + RateLimitOptions *SecurityPolicyRuleRateLimitOptions `json:"rateLimitOptions,omitempty"` + + // RedirectOptions: Parameters defining the redirect action. Cannot be +- // specified for any other actions. ++ // specified for any other actions. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectOptions *SecurityPolicyRuleRedirectOptions `json:"redirectOptions,omitempty"` + + // RuleNumber: Identifier for the rule. This is only unique within the +@@ -46886,9 +48959,19 @@ type SecurityPolicyRuleMatcher struct { + + // Expr: User defined CEVAL expression. A CEVAL expression is used to + // specify match criteria such as origin.ip, source.region_code and +- // contents in the request header. ++ // contents in the request header. Expressions containing ++ // `evaluateThreatIntelligence` require Cloud Armor Managed Protection ++ // Plus tier and are not supported in Edge Policies nor in Regional ++ // Policies. Expressions containing ++ // `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor ++ // Managed Protection Plus tier and are only supported in Global ++ // Security Policies. + Expr *Expr `json:"expr,omitempty"` + ++ // ExprOptions: The configuration options available when specifying a ++ // user defined CEVAL expression (i.e., 'expr'). ++ ExprOptions *SecurityPolicyRuleMatcherExprOptions `json:"exprOptions,omitempty"` ++ + // VersionedExpr: Preconfigured versioned expression. If this field is + // specified, config must also be specified. Available preconfigured + // expressions along with their requirements are: SRC_IPS_V1 - must +@@ -46998,6 +49081,166 @@ func (s *SecurityPolicyRuleMatcherConfigLayer4Config) MarshalJSON() ([]byte, err + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type SecurityPolicyRuleMatcherExprOptions struct { ++ // RecaptchaOptions: reCAPTCHA configuration options to be applied for ++ // the rule. If the rule does not evaluate reCAPTCHA tokens, this field ++ // will have no effect. ++ RecaptchaOptions *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions `json:"recaptchaOptions,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "RecaptchaOptions") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "RecaptchaOptions") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleMatcherExprOptions) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleMatcherExprOptions ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions struct { ++ // ActionTokenSiteKeys: A list of site keys to be used during the ++ // validation of reCAPTCHA action-tokens. The provided site keys need to ++ // be created from reCAPTCHA API under the same project where the ++ // security policy is created. ++ ActionTokenSiteKeys []string `json:"actionTokenSiteKeys,omitempty"` ++ ++ // SessionTokenSiteKeys: A list of site keys to be used during the ++ // validation of reCAPTCHA session-tokens. The provided site keys need ++ // to be created from reCAPTCHA API under the same project where the ++ // security policy is created. ++ SessionTokenSiteKeys []string `json:"sessionTokenSiteKeys,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ActionTokenSiteKeys") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ActionTokenSiteKeys") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// SecurityPolicyRuleNetworkMatcher: Represents a match condition that ++// incoming network traffic is evaluated against. ++type SecurityPolicyRuleNetworkMatcher struct { ++ // DestIpRanges: Destination IPv4/IPv6 addresses or CIDR prefixes, in ++ // standard text format. ++ DestIpRanges []string `json:"destIpRanges,omitempty"` ++ ++ // DestPorts: Destination port numbers for TCP/UDP/SCTP. Each element ++ // can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. ++ // "0-1023"). ++ DestPorts []string `json:"destPorts,omitempty"` ++ ++ // IpProtocols: IPv4 protocol / IPv6 next header (after extension ++ // headers). Each element can be an 8-bit unsigned decimal number (e.g. ++ // "6"), range (e.g. "253-254"), or one of the following protocol names: ++ // "tcp", "udp", "icmp", "esp", "ah", "ipip", or "sctp". ++ IpProtocols []string `json:"ipProtocols,omitempty"` ++ ++ // SrcAsns: BGP Autonomous System Number associated with the source IP ++ // address. ++ SrcAsns []int64 `json:"srcAsns,omitempty"` ++ ++ // SrcIpRanges: Source IPv4/IPv6 addresses or CIDR prefixes, in standard ++ // text format. ++ SrcIpRanges []string `json:"srcIpRanges,omitempty"` ++ ++ // SrcPorts: Source port numbers for TCP/UDP/SCTP. Each element can be a ++ // 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). ++ SrcPorts []string `json:"srcPorts,omitempty"` ++ ++ // SrcRegionCodes: Two-letter ISO 3166-1 alpha-2 country code associated ++ // with the source IP address. ++ SrcRegionCodes []string `json:"srcRegionCodes,omitempty"` ++ ++ // UserDefinedFields: User-defined fields. Each element names a defined ++ // field and lists the matching values for that field. ++ UserDefinedFields []*SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch `json:"userDefinedFields,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "DestIpRanges") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "DestIpRanges") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleNetworkMatcher) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleNetworkMatcher ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch struct { ++ // Name: Name of the user-defined field, as given in the definition. ++ Name string `json:"name,omitempty"` ++ ++ // Values: Matching values of the field. Each element can be a 32-bit ++ // unsigned decimal or hexadecimal (starting with "0x") number (e.g. ++ // "64") or range (e.g. "0x400-0x7ff"). ++ Values []string `json:"values,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Name") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Name") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type SecurityPolicyRulePreconfiguredWafConfig struct { + // Exclusions: A list of exclusions to apply during preconfigured WAF + // evaluation. +@@ -47192,12 +49435,14 @@ type SecurityPolicyRuleRateLimitOptions struct { + // response code, or redirect to a different endpoint. Valid options are + // `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, + // and 502, and `redirect`, where the redirect parameters come from +- // `exceedRedirectOptions` below. ++ // `exceedRedirectOptions` below. The `redirect` action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. + ExceedAction string `json:"exceedAction,omitempty"` + + // ExceedRedirectOptions: Parameters defining the redirect action that + // is used as the exceed action. Cannot be specified if the exceed +- // action is not redirect. ++ // action is not redirect. This field is only supported in Global ++ // Security Policies of type CLOUD_ARMOR. + ExceedRedirectOptions *SecurityPolicyRuleRedirectOptions `json:"exceedRedirectOptions,omitempty"` + + // RateLimitThreshold: Threshold at which to begin ratelimiting. +@@ -47358,6 +49603,62 @@ func (s *SecurityPolicyRuleRedirectOptions) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type SecurityPolicyUserDefinedField struct { ++ // Base: The base relative to which 'offset' is measured. Possible ++ // values are: - IPV4: Points to the beginning of the IPv4 header. - ++ // IPV6: Points to the beginning of the IPv6 header. - TCP: Points to ++ // the beginning of the TCP header, skipping over any IPv4 options or ++ // IPv6 extension headers. Not present for non-first fragments. - UDP: ++ // Points to the beginning of the UDP header, skipping over any IPv4 ++ // options or IPv6 extension headers. Not present for non-first ++ // fragments. required ++ // ++ // Possible values: ++ // "IPV4" ++ // "IPV6" ++ // "TCP" ++ // "UDP" ++ Base string `json:"base,omitempty"` ++ ++ // Mask: If specified, apply this mask (bitwise AND) to the field to ++ // ignore bits before matching. Encoded as a hexadecimal number ++ // (starting with "0x"). The last byte of the field (in network byte ++ // order) corresponds to the least significant byte of the mask. ++ Mask string `json:"mask,omitempty"` ++ ++ // Name: The name of this field. Must be unique within the policy. ++ Name string `json:"name,omitempty"` ++ ++ // Offset: Offset of the first byte of the field (in network byte order) ++ // relative to 'base'. ++ Offset int64 `json:"offset,omitempty"` ++ ++ // Size: Size of the field in bytes. Valid values: 1-4. ++ Size int64 `json:"size,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Base") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Base") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyUserDefinedField) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyUserDefinedField ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // SecuritySettings: The authentication and authorization settings for a + // BackendService. + type SecuritySettings struct { +@@ -47375,7 +49676,7 @@ type SecuritySettings struct { + // should authenticate with this service's backends. clientTlsPolicy + // only applies to a global BackendService with the loadBalancingScheme + // set to INTERNAL_SELF_MANAGED. If left blank, communications are not +- // encrypted. Note: This field currently has no impact. ++ // encrypted. + ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` + + // SubjectAltNames: Optional. A list of Subject Alternative Names (SANs) +@@ -47390,8 +49691,7 @@ type SecuritySettings struct { + // Public Key Infrastructure which provisions server identities. Only + // applies to a global BackendService with loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. Only applies when BackendService has an +- // attached clientTlsPolicy with clientCertificate (mTLS mode). Note: +- // This field currently has no impact. ++ // attached clientTlsPolicy with clientCertificate (mTLS mode). + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Authentication") to +@@ -47540,7 +49840,7 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + // attachment represents a service that a producer has exposed. It + // encapsulates the load balancer which fronts the service runs and a + // list of NAT IP ranges that the producers uses to represent the +-// consumers connecting to the service. next tag = 20 ++// consumers connecting to the service. + type ServiceAttachment struct { + // ConnectedEndpoints: [Output Only] An array of connections for all the + // consumers connected to this service attachment. +@@ -47625,6 +49925,18 @@ type ServiceAttachment struct { + // the PSC service attachment. + PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"` + ++ // ReconcileConnections: This flag determines whether a consumer ++ // accept/reject list change can reconcile the statuses of existing ++ // ACCEPTED or REJECTED PSC endpoints. - If false, connection policy ++ // update will only affect existing PENDING PSC endpoints. Existing ++ // ACCEPTED/REJECTED endpoints will remain untouched regardless how the ++ // connection policy is modified . - If true, update will affect both ++ // PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED ++ // PSC endpoint will be moved to REJECTED if its project is added to the ++ // reject list. For newly created service attachment, this boolean ++ // defaults to true. ++ ReconcileConnections bool `json:"reconcileConnections,omitempty"` ++ + // Region: [Output Only] URL of the region where the service attachment + // resides. This field applies only to the region resource. You must + // specify this field as part of the HTTP request URL. It is not +@@ -48830,6 +51142,24 @@ type Snapshot struct { + // disk name. + SourceDiskId string `json:"sourceDiskId,omitempty"` + ++ // SourceInstantSnapshot: The source instant snapshot used to create ++ // this snapshot. You can provide this as a partial or full URL to the ++ // resource. For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /instantSnapshots/instantSnapshot - ++ // projects/project/zones/zone/instantSnapshots/instantSnapshot - ++ // zones/zone/instantSnapshots/instantSnapshot ++ SourceInstantSnapshot string `json:"sourceInstantSnapshot,omitempty"` ++ ++ // SourceInstantSnapshotId: [Output Only] The unique ID of the instant ++ // snapshot used to create this snapshot. This value identifies the ++ // exact instant snapshot that was used to create this persistent disk. ++ // For example, if you created the persistent disk from an instant ++ // snapshot that was later deleted and recreated under the same name, ++ // the source instant snapshot ID would identify the exact instant ++ // snapshot that was used. ++ SourceInstantSnapshotId string `json:"sourceInstantSnapshotId,omitempty"` ++ + // SourceSnapshotSchedulePolicy: [Output Only] URL of the resource + // policy which created this scheduled snapshot. + SourceSnapshotSchedulePolicy string `json:"sourceSnapshotSchedulePolicy,omitempty"` +@@ -51063,8 +53393,8 @@ type Subnetwork struct { + // If this field is not explicitly set, it will not appear in get + // listings. If not set the default behavior is determined by the org + // policy, if there is no org policy specified, then it will default to +- // disabled. This field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // disabled. This field isn't supported if the subnet purpose field is ++ // set to REGIONAL_MANAGED_PROXY. + EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` + + // ExternalIpv6Prefix: The external IPv6 address range that is owned by +@@ -51157,12 +53487,20 @@ type Subnetwork struct { + PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` + + // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal +@@ -51184,9 +53522,9 @@ type Subnetwork struct { + ReservedInternalRange string `json:"reservedInternalRange,omitempty"` + + // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // +@@ -51662,6 +54000,8 @@ type SubnetworkLogConfig struct { + // field is not explicitly set, it will not appear in get listings. If + // not set the default behavior is determined by the org policy, if + // there is no org policy specified, then it will default to disabled. ++ // Flow logging isn't supported if the subnet purpose field is set to ++ // REGIONAL_MANAGED_PROXY. + Enable bool `json:"enable,omitempty"` + + // FilterExpr: Can only be specified if VPC flow logs for this +@@ -52674,6 +55014,15 @@ type TargetHttpProxy struct { + // ForwardingRule for more details. + HttpFilters []string `json:"httpFilters,omitempty"` + ++ // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection ++ // open, after completing a response, while there is no matching traffic ++ // (in seconds). If an HTTP keep-alive is not specified, a default value ++ // (610 seconds) will be used. For Global external HTTP(S) load ++ // balancer, the minimum allowed value is 5 seconds and the maximum ++ // allowed value is 1200 seconds. For Global external HTTP(S) load ++ // balancer (classic), this option is not available publicly. ++ HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` ++ + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` +@@ -53296,7 +55645,9 @@ func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) + + type TargetHttpsProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetHttpsProxy. ++ // TargetHttpsProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -53419,7 +55770,9 @@ type TargetHttpsProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -53454,6 +55807,15 @@ type TargetHttpsProxy struct { + // ForwardingRule for more details. + HttpFilters []string `json:"httpFilters,omitempty"` + ++ // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection ++ // open, after completing a response, while there is no matching traffic ++ // (in seconds). If an HTTP keep-alive is not specified, a default value ++ // (610 seconds) will be used. For Global external HTTP(S) load ++ // balancer, the minimum allowed value is 5 seconds and the maximum ++ // allowed value is 1200 seconds. For Global external HTTP(S) load ++ // balancer (classic), this option is not available publicly. ++ HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` ++ + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` +@@ -53512,9 +55874,11 @@ type TargetHttpsProxy struct { + // networksecurity.ServerTlsPolicy resource that describes how the proxy + // should authenticate inbound traffic. serverTlsPolicy only applies to + // a global TargetHttpsProxy attached to globalForwardingRules with the +- // loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, +- // communications are not encrypted. Note: This field currently has no +- // impact. ++ // loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or ++ // EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are ++ // accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, ++ // EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy ++ // documentation. If left blank, communications are not encrypted. + ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to +@@ -54001,6 +56365,10 @@ type TargetInstance struct { + // network that the default network interface belongs to. + Network string `json:"network,omitempty"` + ++ // SecurityPolicy: [Output Only] The resource URL for the security ++ // policy associated with this target instance. ++ SecurityPolicy string `json:"securityPolicy,omitempty"` ++ + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + +@@ -54660,6 +57028,10 @@ type TargetPool struct { + // resides. + Region string `json:"region,omitempty"` + ++ // SecurityPolicy: [Output Only] The resource URL for the security ++ // policy associated with this target pool. ++ SecurityPolicy string `json:"securityPolicy,omitempty"` ++ + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + +@@ -55500,7 +57872,9 @@ func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) + + type TargetSslProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetSslProxy. ++ // TargetSslProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -55598,7 +57972,9 @@ type TargetSslProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -58415,12 +60791,20 @@ type UsableSubnetwork struct { + Network string `json:"network,omitempty"` + + // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal +@@ -58435,9 +60819,9 @@ type UsableSubnetwork struct { + Purpose string `json:"purpose,omitempty"` + + // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // +@@ -59113,6 +61497,16 @@ type VpnGateway struct { + // property when you create the resource. + Description string `json:"description,omitempty"` + ++ // GatewayIpVersion: The IP family of the gateway IPs for the HA-VPN ++ // gateway interfaces. If not specified, IPV4 will be used. ++ // ++ // Possible values: ++ // "IPV4" - Every HA-VPN gateway interface is configured with an IPv4 ++ // address. ++ // "IPV6" - Every HA-VPN gateway interface is configured with an IPv6 ++ // address. ++ GatewayIpVersion string `json:"gatewayIpVersion,omitempty"` ++ + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` +@@ -59666,7 +62060,7 @@ type VpnGatewayStatusTunnel struct { + + // PeerGatewayInterface: The peer gateway interface this VPN tunnel is + // connected to, the peer gateway could either be an external VPN +- // gateway or GCP VPN gateway. ++ // gateway or a Google Cloud VPN gateway. + PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` + + // TunnelUrl: URL reference to the VPN tunnel. +@@ -59699,8 +62093,8 @@ func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { + + // VpnGatewayStatusVpnConnection: A VPN connection contains all VPN + // tunnels connected from this VpnGateway to the same peer gateway. The +-// peer gateway could either be a external VPN gateway or GCP VPN +-// gateway. ++// peer gateway could either be an external VPN gateway or a Google ++// Cloud VPN gateway. + type VpnGatewayStatusVpnConnection struct { + // PeerExternalGateway: URL reference to the peer external VPN gateways + // to which the VPN tunnels in this VPN connection are connected. This +@@ -59768,6 +62162,12 @@ type VpnGatewayVpnGatewayInterface struct { + // address must be a regional external IP address. + IpAddress string `json:"ipAddress,omitempty"` + ++ // Ipv6Address: [Output Only] IPv6 address for this VPN interface ++ // associated with the VPN gateway. The IPv6 address must be a regional ++ // external IPv6 address. The format is RFC 5952 format (e.g. ++ // 2001:db8::2d9:51:0:0). ++ Ipv6Address string `json:"ipv6Address,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -63303,6 +65703,194 @@ func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) erro + } + } + ++// method id "compute.addresses.move": ++ ++type AddressesMoveCall struct { ++ s *Service ++ project string ++ region string ++ address string ++ regionaddressesmoverequest *RegionAddressesMoveRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Move: Moves the specified address resource. ++// ++// - address: Name of the address resource to move. ++// - project: Source project ID which the Address is moved from. ++// - region: Name of the region for this request. ++func (r *AddressesService) Move(project string, region string, address string, regionaddressesmoverequest *RegionAddressesMoveRequest) *AddressesMoveCall { ++ c := &AddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.address = address ++ c.regionaddressesmoverequest = regionaddressesmoverequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *AddressesMoveCall) RequestId(requestId string) *AddressesMoveCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *AddressesMoveCall) Fields(s ...googleapi.Field) *AddressesMoveCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *AddressesMoveCall) Context(ctx context.Context) *AddressesMoveCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *AddressesMoveCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *AddressesMoveCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionaddressesmoverequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{address}/move") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "address": c.address, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.addresses.move" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *AddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Moves the specified address resource.", ++ // "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", ++ // "httpMethod": "POST", ++ // "id": "compute.addresses.move", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "address" ++ // ], ++ // "parameters": { ++ // "address": { ++ // "description": "Name of the address resource to move.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Source project ID which the Address is moved from.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/addresses/{address}/move", ++ // "request": { ++ // "$ref": "RegionAddressesMoveRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.addresses.setLabels": + + type AddressesSetLabelsCall struct { +@@ -71499,6 +74087,182 @@ func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggrega + } + } + ++// method id "compute.disks.bulkInsert": ++ ++type DisksBulkInsertCall struct { ++ s *Service ++ project string ++ zone string ++ bulkinsertdiskresource *BulkInsertDiskResource ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// BulkInsert: Bulk create a set of disks. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *DisksService) BulkInsert(project string, zone string, bulkinsertdiskresource *BulkInsertDiskResource) *DisksBulkInsertCall { ++ c := &DisksBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.bulkinsertdiskresource = bulkinsertdiskresource ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *DisksBulkInsertCall) RequestId(requestId string) *DisksBulkInsertCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *DisksBulkInsertCall) Fields(s ...googleapi.Field) *DisksBulkInsertCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *DisksBulkInsertCall) Context(ctx context.Context) *DisksBulkInsertCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *DisksBulkInsertCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *DisksBulkInsertCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertdiskresource) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/bulkInsert") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.disks.bulkInsert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *DisksBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Bulk create a set of disks.", ++ // "flatPath": "projects/{project}/zones/{zone}/disks/bulkInsert", ++ // "httpMethod": "POST", ++ // "id": "compute.disks.bulkInsert", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/disks/bulkInsert", ++ // "request": { ++ // "$ref": "BulkInsertDiskResource" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.disks.createSnapshot": + + type DisksCreateSnapshotCall struct { +@@ -73460,38 +76224,54 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error + + } + +-// method id "compute.disks.testIamPermissions": ++// method id "compute.disks.startAsyncReplication": + +-type DisksTestIamPermissionsCall struct { +- s *Service +- project string +- zone string +- resource string +- testpermissionsrequest *TestPermissionsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type DisksStartAsyncReplicationCall struct { ++ s *Service ++ project string ++ zone string ++ disk string ++ disksstartasyncreplicationrequest *DisksStartAsyncReplicationRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. ++// StartAsyncReplication: Starts asynchronous replication. Must be ++// invoked on the primary disk. + // ++// - disk: The name of the persistent disk. + // - project: Project ID for this request. +-// - resource: Name or id of the resource for this request. + // - zone: The name of the zone for this request. +-func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { +- c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *DisksService) StartAsyncReplication(project string, zone string, disk string, disksstartasyncreplicationrequest *DisksStartAsyncReplicationRequest) *DisksStartAsyncReplicationCall { ++ c := &DisksStartAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone +- c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest ++ c.disk = disk ++ c.disksstartasyncreplicationrequest = disksstartasyncreplicationrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *DisksStartAsyncReplicationCall) RequestId(requestId string) *DisksStartAsyncReplicationCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { ++func (c *DisksStartAsyncReplicationCall) Fields(s ...googleapi.Field) *DisksStartAsyncReplicationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -73499,21 +76279,21 @@ func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIam + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { ++func (c *DisksStartAsyncReplicationCall) Context(ctx context.Context) *DisksStartAsyncReplicationCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *DisksTestIamPermissionsCall) Header() http.Header { ++func (c *DisksStartAsyncReplicationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *DisksStartAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -73521,14 +76301,14 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksstartasyncreplicationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -73536,21 +76316,21 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "resource": c.resource, ++ "project": c.project, ++ "zone": c.zone, ++ "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.disks.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++// Do executes the "compute.disks.startAsyncReplication" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *DisksStartAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -73569,7 +76349,7 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &TestPermissionsResponse{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -73581,16 +76361,23 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", ++ // "description": "Starts asynchronous replication. Must be invoked on the primary disk.", ++ // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", + // "httpMethod": "POST", +- // "id": "compute.disks.testIamPermissions", ++ // "id": "compute.disks.startAsyncReplication", + // "parameterOrder": [ + // "project", + // "zone", +- // "resource" ++ // "disk" + // ], + // "parameters": { ++ // "disk": { ++ // "description": "The name of the persistent disk.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -73598,11 +76385,9 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", + // "type": "string" + // }, + // "zone": { +@@ -73613,55 +76398,44 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", ++ // "path": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", + // "request": { +- // "$ref": "TestPermissionsRequest" ++ // "$ref": "DisksStartAsyncReplicationRequest" + // }, + // "response": { +- // "$ref": "TestPermissionsResponse" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.disks.update": ++// method id "compute.disks.stopAsyncReplication": + +-type DisksUpdateCall struct { ++type DisksStopAsyncReplicationCall struct { + s *Service + project string + zone string + disk string +- disk2 *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// Update: Updates the specified disk with the data included in the +-// request. The update is performed only on selected fields included as +-// part of update-mask. Only the following fields can be modified: +-// user_license. ++// StopAsyncReplication: Stops asynchronous replication. Can be invoked ++// either on the primary or on the secondary disk. + // +-// - disk: The disk name for this request. ++// - disk: The name of the persistent disk. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *DisksService) Update(project string, zone string, disk string, disk2 *Disk) *DisksUpdateCall { +- c := &DisksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *DisksService) StopAsyncReplication(project string, zone string, disk string) *DisksStopAsyncReplicationCall { ++ c := &DisksStopAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk +- c.disk2 = disk2 +- return c +-} +- +-// Paths sets the optional parameter "paths": +-func (c *DisksUpdateCall) Paths(paths ...string) *DisksUpdateCall { +- c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + return c + } + +@@ -73676,22 +76450,15 @@ func (c *DisksUpdateCall) Paths(paths ...string) *DisksUpdateCall { + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *DisksUpdateCall) RequestId(requestId string) *DisksUpdateCall { ++func (c *DisksStopAsyncReplicationCall) RequestId(requestId string) *DisksStopAsyncReplicationCall { + c.urlParams_.Set("requestId", requestId) + return c + } + +-// UpdateMask sets the optional parameter "updateMask": update_mask +-// indicates fields to be updated as part of this request. +-func (c *DisksUpdateCall) UpdateMask(updateMask string) *DisksUpdateCall { +- c.urlParams_.Set("updateMask", updateMask) +- return c +-} +- + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *DisksUpdateCall) Fields(s ...googleapi.Field) *DisksUpdateCall { ++func (c *DisksStopAsyncReplicationCall) Fields(s ...googleapi.Field) *DisksStopAsyncReplicationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -73699,21 +76466,21 @@ func (c *DisksUpdateCall) Fields(s ...googleapi.Field) *DisksUpdateCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *DisksUpdateCall) Context(ctx context.Context) *DisksUpdateCall { ++func (c *DisksStopAsyncReplicationCall) Context(ctx context.Context) *DisksStopAsyncReplicationCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *DisksUpdateCall) Header() http.Header { ++func (c *DisksStopAsyncReplicationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { ++func (c *DisksStopAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -73721,16 +76488,11 @@ func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk2) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -73743,14 +76505,563 @@ func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.disks.update" call. ++// Do executes the "compute.disks.stopAsyncReplication" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *DisksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *DisksStopAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", ++ // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", ++ // "httpMethod": "POST", ++ // "id": "compute.disks.stopAsyncReplication", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "disk" ++ // ], ++ // "parameters": { ++ // "disk": { ++ // "description": "The name of the persistent disk.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.disks.stopGroupAsyncReplication": ++ ++type DisksStopGroupAsyncReplicationCall struct { ++ s *Service ++ project string ++ zone string ++ disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// StopGroupAsyncReplication: Stops asynchronous replication for a ++// consistency group of disks. Can be invoked either in the primary or ++// secondary scope. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. This must be the zone ++// of the primary or secondary disks in the consistency group. ++func (r *DisksService) StopGroupAsyncReplication(project string, zone string, disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource) *DisksStopGroupAsyncReplicationCall { ++ c := &DisksStopGroupAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.disksstopgroupasyncreplicationresource = disksstopgroupasyncreplicationresource ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *DisksStopGroupAsyncReplicationCall) RequestId(requestId string) *DisksStopGroupAsyncReplicationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *DisksStopGroupAsyncReplicationCall) Fields(s ...googleapi.Field) *DisksStopGroupAsyncReplicationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *DisksStopGroupAsyncReplicationCall) Context(ctx context.Context) *DisksStopGroupAsyncReplicationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *DisksStopGroupAsyncReplicationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *DisksStopGroupAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksstopgroupasyncreplicationresource) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.disks.stopGroupAsyncReplication" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *DisksStopGroupAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", ++ // "flatPath": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", ++ // "httpMethod": "POST", ++ // "id": "compute.disks.stopGroupAsyncReplication", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request. This must be the zone of the primary or secondary disks in the consistency group.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", ++ // "request": { ++ // "$ref": "DisksStopGroupAsyncReplicationResource" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.disks.testIamPermissions": ++ ++type DisksTestIamPermissionsCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { ++ c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *DisksTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.disks.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.disks.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.disks.update": ++ ++type DisksUpdateCall struct { ++ s *Service ++ project string ++ zone string ++ disk string ++ disk2 *Disk ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Update: Updates the specified disk with the data included in the ++// request. The update is performed only on selected fields included as ++// part of update-mask. Only the following fields can be modified: ++// user_license. ++// ++// - disk: The disk name for this request. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *DisksService) Update(project string, zone string, disk string, disk2 *Disk) *DisksUpdateCall { ++ c := &DisksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.disk = disk ++ c.disk2 = disk2 ++ return c ++} ++ ++// Paths sets the optional parameter "paths": ++func (c *DisksUpdateCall) Paths(paths ...string) *DisksUpdateCall { ++ c.urlParams_.SetMulti("paths", append([]string{}, paths...)) ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *DisksUpdateCall) RequestId(requestId string) *DisksUpdateCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// UpdateMask sets the optional parameter "updateMask": update_mask ++// indicates fields to be updated as part of this request. ++func (c *DisksUpdateCall) UpdateMask(updateMask string) *DisksUpdateCall { ++ c.urlParams_.Set("updateMask", updateMask) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *DisksUpdateCall) Fields(s ...googleapi.Field) *DisksUpdateCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *DisksUpdateCall) Context(ctx context.Context) *DisksUpdateCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *DisksUpdateCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk2) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("PATCH", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "disk": c.disk, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.disks.update" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *DisksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -81855,6 +85166,183 @@ func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList + } + } + ++// method id "compute.globalAddresses.move": ++ ++type GlobalAddressesMoveCall struct { ++ s *Service ++ project string ++ address string ++ globaladdressesmoverequest *GlobalAddressesMoveRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Move: Moves the specified address resource from one project to ++// another project. ++// ++// - address: Name of the address resource to move. ++// - project: Source project ID which the Address is moved from. ++func (r *GlobalAddressesService) Move(project string, address string, globaladdressesmoverequest *GlobalAddressesMoveRequest) *GlobalAddressesMoveCall { ++ c := &GlobalAddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.address = address ++ c.globaladdressesmoverequest = globaladdressesmoverequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *GlobalAddressesMoveCall) RequestId(requestId string) *GlobalAddressesMoveCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *GlobalAddressesMoveCall) Fields(s ...googleapi.Field) *GlobalAddressesMoveCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *GlobalAddressesMoveCall) Context(ctx context.Context) *GlobalAddressesMoveCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *GlobalAddressesMoveCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *GlobalAddressesMoveCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.globaladdressesmoverequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}/move") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "address": c.address, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.globalAddresses.move" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *GlobalAddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Moves the specified address resource from one project to another project.", ++ // "flatPath": "projects/{project}/global/addresses/{address}/move", ++ // "httpMethod": "POST", ++ // "id": "compute.globalAddresses.move", ++ // "parameterOrder": [ ++ // "project", ++ // "address" ++ // ], ++ // "parameters": { ++ // "address": { ++ // "description": "Name of the address resource to move.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Source project ID which the Address is moved from.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/addresses/{address}/move", ++ // "request": { ++ // "$ref": "GlobalAddressesMoveRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.globalAddresses.setLabels": + + type GlobalAddressesSetLabelsCall struct { +@@ -97888,6 +101376,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C + } + return ret, nil + // { ++ // "deprecated": true, + // "description": "Motifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use instanceGroupManagers.patch instead.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "httpMethod": "POST", +@@ -109088,32 +112577,33 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio + + } + +-// method id "compute.instances.setServiceAccount": ++// method id "compute.instances.setSecurityPolicy": + +-type InstancesSetServiceAccountCall struct { ++type InstancesSetSecurityPolicyCall struct { + s *Service + project string + zone string + instance string +- instancessetserviceaccountrequest *InstancesSetServiceAccountRequest ++ instancessetsecuritypolicyrequest *InstancesSetSecurityPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// SetServiceAccount: Sets the service account on the instance. For more +-// information, read Changing the service account and access scopes for +-// an instance. ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified instance. For more information, see Google Cloud Armor ++// Overview + // +-// - instance: Name of the instance resource to start. +-// - project: Project ID for this request. +-// - zone: The name of the zone for this request. +-func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { +- c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instance: Name of the Instance resource to which the security ++// policy should be set. The name should conform to RFC1035. ++// - project: Project ID for this request. ++// - zone: Name of the zone scoping this request. ++func (r *InstancesService) SetSecurityPolicy(project string, zone string, instance string, instancessetsecuritypolicyrequest *InstancesSetSecurityPolicyRequest) *InstancesSetSecurityPolicyCall { ++ c := &InstancesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.instancessetserviceaccountrequest = instancessetserviceaccountrequest ++ c.instancessetsecuritypolicyrequest = instancessetsecuritypolicyrequest + return c + } + +@@ -109128,7 +112618,7 @@ func (r *InstancesService) SetServiceAccount(project string, zone string, instan + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) RequestId(requestId string) *InstancesSetSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -109136,7 +112626,7 @@ func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesS + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetSecurityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -109144,21 +112634,21 @@ func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *Instances + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) Context(ctx context.Context) *InstancesSetSecurityPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetServiceAccountCall) Header() http.Header { ++func (c *InstancesSetSecurityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -109166,14 +112656,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetsecuritypolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -109188,14 +112678,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setServiceAccount" call. ++// Do executes the "compute.instances.setSecurityPolicy" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -109226,10 +112716,10 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + } + return ret, nil + // { +- // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "description": "Sets the Google Cloud Armor security policy for the specified instance. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + // "httpMethod": "POST", +- // "id": "compute.instances.setServiceAccount", ++ // "id": "compute.instances.setSecurityPolicy", + // "parameterOrder": [ + // "project", + // "zone", +@@ -109237,9 +112727,8 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + // ], + // "parameters": { + // "instance": { +- // "description": "Name of the instance resource to start.", ++ // "description": "Name of the Instance resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -109256,16 +112745,16 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + // "type": "string" + // }, + // "zone": { +- // "description": "The name of the zone for this request.", ++ // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + // "request": { +- // "$ref": "InstancesSetServiceAccountRequest" ++ // "$ref": "InstancesSetSecurityPolicyRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -109278,33 +112767,32 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + + } + +-// method id "compute.instances.setShieldedInstanceIntegrityPolicy": ++// method id "compute.instances.setServiceAccount": + +-type InstancesSetShieldedInstanceIntegrityPolicyCall struct { +- s *Service +- project string +- zone string +- instance string +- shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesSetServiceAccountCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ instancessetserviceaccountrequest *InstancesSetServiceAccountRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance +-// integrity policy for an instance. You can only use this method on a +-// running instance. This method supports PATCH semantics and uses the +-// JSON merge patch format and processing rules. ++// SetServiceAccount: Sets the service account on the instance. For more ++// information, read Changing the service account and access scopes for ++// an instance. + // +-// - instance: Name or id of the instance scoping this request. ++// - instance: Name of the instance resource to start. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { +- c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { ++ c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy ++ c.instancessetserviceaccountrequest = instancessetserviceaccountrequest + return c + } + +@@ -109319,7 +112807,7 @@ func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zo + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -109327,7 +112815,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId st + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -109335,21 +112823,21 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi. + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { ++func (c *InstancesSetServiceAccountCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -109357,16 +112845,16 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -109379,14 +112867,14 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. ++// Do executes the "compute.instances.setServiceAccount" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -109417,10 +112905,10 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + } + return ret, nil + // { +- // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", +- // "httpMethod": "PATCH", +- // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", ++ // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "httpMethod": "POST", ++ // "id": "compute.instances.setServiceAccount", + // "parameterOrder": [ + // "project", + // "zone", +@@ -109428,7 +112916,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + // ], + // "parameters": { + // "instance": { +- // "description": "Name or id of the instance scoping this request.", ++ // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -109454,9 +112942,9 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "request": { +- // "$ref": "ShieldedInstanceIntegrityPolicy" ++ // "$ref": "InstancesSetServiceAccountRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -109469,33 +112957,33 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + + } + +-// method id "compute.instances.setShieldedVmIntegrityPolicy": ++// method id "compute.instances.setShieldedInstanceIntegrityPolicy": + +-type InstancesSetShieldedVmIntegrityPolicyCall struct { +- s *Service +- project string +- zone string +- instance string +- shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesSetShieldedInstanceIntegrityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy +-// for a VM instance. You can only use this method on a running VM +-// instance. This method supports PATCH semantics and uses the JSON +-// merge patch format and processing rules. ++// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance ++// integrity policy for an instance. You can only use this method on a ++// running instance. This method supports PATCH semantics and uses the ++// JSON merge patch format and processing rules. + // +-// - instance: Name of the instance scoping this request. ++// - instance: Name or id of the instance scoping this request. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { +- c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++ c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.shieldedvmintegritypolicy = shieldedvmintegritypolicy ++ c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy + return c + } + +@@ -109510,7 +112998,7 @@ func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone str + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -109518,7 +113006,7 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -109526,21 +113014,21 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -109548,14 +113036,14 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { +@@ -109570,14 +113058,205 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. ++// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", ++ // "httpMethod": "PATCH", ++ // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "instance" ++ // ], ++ // "parameters": { ++ // "instance": { ++ // "description": "Name or id of the instance scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", ++ // "request": { ++ // "$ref": "ShieldedInstanceIntegrityPolicy" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.instances.setShieldedVmIntegrityPolicy": ++ ++type InstancesSetShieldedVmIntegrityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy ++// for a VM instance. You can only use this method on a running VM ++// instance. This method supports PATCH semantics and uses the JSON ++// merge patch format and processing rules. ++// ++// - instance: Name of the instance scoping this request. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { ++ c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.instance = instance ++ c.shieldedvmintegritypolicy = shieldedvmintegritypolicy ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("PATCH", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "instance": c.instance, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -109875,6 +113554,22 @@ func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, + return c + } + ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +@@ -109987,6 +113682,11 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) + // "required": true, + // "type": "string" + // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", +@@ -112174,9 +115874,9 @@ func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) ( + + } + +-// method id "compute.interconnectAttachments.aggregatedList": ++// method id "compute.instantSnapshots.aggregatedList": + +-type InterconnectAttachmentsAggregatedListCall struct { ++type InstantSnapshotsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -112185,12 +115885,11 @@ type InterconnectAttachmentsAggregatedListCall struct { + header_ http.Header + } + +-// AggregatedList: Retrieves an aggregated list of interconnect +-// attachments. ++// AggregatedList: Retrieves an aggregated list of instantSnapshots. + // + // - project: Project ID for this request. +-func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { +- c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstantSnapshotsService) AggregatedList(project string) *InstantSnapshotsAggregatedListCall { ++ c := &InstantSnapshotsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -112230,7 +115929,7 @@ func (r *InterconnectAttachmentsService) AggregatedList(project string) *Interco + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) Filter(filter string) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -112243,7 +115942,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *Inter + // response. For resource types which predate this field, if this flag + // is omitted or false, only scopes of the scope types where the + // resource type is expected to be found will be included. +-func (c *InterconnectAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c + } +@@ -112254,7 +115953,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) IncludeAllScopes(includeAllS + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) MaxResults(maxResults int64) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -112268,7 +115967,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) OrderBy(orderBy string) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -112276,7 +115975,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *Int + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) PageToken(pageToken string) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -112285,7 +115984,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -112293,7 +115992,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnP + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) Fields(s ...googleapi.Field) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -112303,7 +116002,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) IfNoneMatch(entityTag string) *InstantSnapshotsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -112311,21 +116010,21 @@ func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) Context(ctx context.Context) *InstantSnapshotsAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { ++func (c *InstantSnapshotsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -112338,7 +116037,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/interconnectAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/instantSnapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -112351,15 +116050,14 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.aggregatedList" call. +-// Exactly one of *InterconnectAttachmentAggregatedList or error will be ++// Do executes the "compute.instantSnapshots.aggregatedList" call. ++// Exactly one of *InstantSnapshotAggregatedList or error will be + // non-nil. Any non-2xx status code is an error. Response headers are in +-// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +-// (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { ++// either *InstantSnapshotAggregatedList.ServerResponse.Header or (if a ++// response was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InstantSnapshotsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstantSnapshotAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -112378,7 +116076,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InterconnectAttachmentAggregatedList{ ++ ret := &InstantSnapshotAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -112390,10 +116088,10 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt + } + return ret, nil + // { +- // "description": "Retrieves an aggregated list of interconnect attachments.", +- // "flatPath": "projects/{project}/aggregated/interconnectAttachments", ++ // "description": "Retrieves an aggregated list of instantSnapshots.", ++ // "flatPath": "projects/{project}/aggregated/instantSnapshots", + // "httpMethod": "GET", +- // "id": "compute.interconnectAttachments.aggregatedList", ++ // "id": "compute.instantSnapshots.aggregatedList", + // "parameterOrder": [ + // "project" + // ], +@@ -112439,9 +116137,9 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/aggregated/interconnectAttachments", ++ // "path": "projects/{project}/aggregated/instantSnapshots", + // "response": { +- // "$ref": "InterconnectAttachmentAggregatedList" ++ // "$ref": "InstantSnapshotAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -112455,7 +116153,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { ++func (c *InstantSnapshotsAggregatedListCall) Pages(ctx context.Context, f func(*InstantSnapshotAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -112473,29 +116171,33 @@ func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f + } + } + +-// method id "compute.interconnectAttachments.delete": ++// method id "compute.instantSnapshots.delete": + +-type InterconnectAttachmentsDeleteCall struct { +- s *Service +- project string +- region string +- interconnectAttachment string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstantSnapshotsDeleteCall struct { ++ s *Service ++ project string ++ zone string ++ instantSnapshot string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified interconnect attachment. ++// Delete: Deletes the specified InstantSnapshot resource. Keep in mind ++// that deleting a single instantSnapshot might not necessarily delete ++// all the data on that instantSnapshot. If any data on the ++// instantSnapshot that is marked for deletion is needed for subsequent ++// instantSnapshots, the data will be moved to the next corresponding ++// instantSnapshot. For more information, see Deleting instantSnapshots. + // +-// - interconnectAttachment: Name of the interconnect attachment to +-// delete. +-// - project: Project ID for this request. +-// - region: Name of the region for this request. +-func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { +- c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instantSnapshot: Name of the InstantSnapshot resource to delete. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) Delete(project string, zone string, instantSnapshot string) *InstantSnapshotsDeleteCall { ++ c := &InstantSnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.interconnectAttachment = interconnectAttachment ++ c.zone = zone ++ c.instantSnapshot = instantSnapshot + return c + } + +@@ -112510,7 +116212,7 @@ func (r *InterconnectAttachmentsService) Delete(project string, region string, i + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { ++func (c *InstantSnapshotsDeleteCall) RequestId(requestId string) *InstantSnapshotsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -112518,7 +116220,7 @@ func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *Interco + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { ++func (c *InstantSnapshotsDeleteCall) Fields(s ...googleapi.Field) *InstantSnapshotsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -112526,21 +116228,21 @@ func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *Interc + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { ++func (c *InstantSnapshotsDeleteCall) Context(ctx context.Context) *InstantSnapshotsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { ++func (c *InstantSnapshotsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -112550,7 +116252,7 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { +@@ -112558,21 +116260,21 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "interconnectAttachment": c.interconnectAttachment, ++ "project": c.project, ++ "zone": c.zone, ++ "instantSnapshot": c.instantSnapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.delete" call. ++// Do executes the "compute.instantSnapshots.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstantSnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -112603,18 +116305,18 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O + } + return ret, nil + // { +- // "description": "Deletes the specified interconnect attachment.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ // "description": "Deletes the specified InstantSnapshot resource. Keep in mind that deleting a single instantSnapshot might not necessarily delete all the data on that instantSnapshot. If any data on the instantSnapshot that is marked for deletion is needed for subsequent instantSnapshots, the data will be moved to the next corresponding instantSnapshot. For more information, see Deleting instantSnapshots.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + // "httpMethod": "DELETE", +- // "id": "compute.interconnectAttachments.delete", ++ // "id": "compute.instantSnapshots.delete", + // "parameterOrder": [ + // "project", +- // "region", +- // "interconnectAttachment" ++ // "zone", ++ // "instantSnapshot" + // ], + // "parameters": { +- // "interconnectAttachment": { +- // "description": "Name of the interconnect attachment to delete.", ++ // "instantSnapshot": { ++ // "description": "Name of the InstantSnapshot resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -112627,20 +116329,209 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region for this request.", ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.instantSnapshots.export": ++ ++type InstantSnapshotsExportCall struct { ++ s *Service ++ project string ++ zone string ++ instantSnapshot string ++ instantsnapshotsexportrequest *InstantSnapshotsExportRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Export: Export the changed blocks between two instant snapshots to a ++// customer's bucket in the user specified format. ++// ++// - instantSnapshot: Name of the instant snapshot to export. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) Export(project string, zone string, instantSnapshot string, instantsnapshotsexportrequest *InstantSnapshotsExportRequest) *InstantSnapshotsExportCall { ++ c := &InstantSnapshotsExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.instantSnapshot = instantSnapshot ++ c.instantsnapshotsexportrequest = instantsnapshotsexportrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InstantSnapshotsExportCall) RequestId(requestId string) *InstantSnapshotsExportCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InstantSnapshotsExportCall) Fields(s ...googleapi.Field) *InstantSnapshotsExportCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InstantSnapshotsExportCall) Context(ctx context.Context) *InstantSnapshotsExportCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InstantSnapshotsExportCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InstantSnapshotsExportCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instantsnapshotsexportrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}/export") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "instantSnapshot": c.instantSnapshot, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.instantSnapshots.export" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InstantSnapshotsExportCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Export the changed blocks between two instant snapshots to a customer's bucket in the user specified format.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}/export", ++ // "httpMethod": "POST", ++ // "id": "compute.instantSnapshots.export", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "instantSnapshot" ++ // ], ++ // "parameters": { ++ // "instantSnapshot": { ++ // "description": "Name of the instant snapshot to export.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}/export", ++ // "request": { ++ // "$ref": "InstantSnapshotsExportRequest" ++ // }, + // "response": { + // "$ref": "Operation" + // }, +@@ -112652,37 +116543,37 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O + + } + +-// method id "compute.interconnectAttachments.get": ++// method id "compute.instantSnapshots.get": + +-type InterconnectAttachmentsGetCall struct { +- s *Service +- project string +- region string +- interconnectAttachment string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type InstantSnapshotsGetCall struct { ++ s *Service ++ project string ++ zone string ++ instantSnapshot string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified interconnect attachment. ++// Get: Returns the specified InstantSnapshot resource in the specified ++// zone. + // +-// - interconnectAttachment: Name of the interconnect attachment to +-// return. +-// - project: Project ID for this request. +-// - region: Name of the region for this request. +-func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { +- c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instantSnapshot: Name of the InstantSnapshot resource to return. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) Get(project string, zone string, instantSnapshot string) *InstantSnapshotsGetCall { ++ c := &InstantSnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.interconnectAttachment = interconnectAttachment ++ c.zone = zone ++ c.instantSnapshot = instantSnapshot + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { ++func (c *InstantSnapshotsGetCall) Fields(s ...googleapi.Field) *InstantSnapshotsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -112692,7 +116583,7 @@ func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *Interconn + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { ++func (c *InstantSnapshotsGetCall) IfNoneMatch(entityTag string) *InstantSnapshotsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -112700,21 +116591,21 @@ func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *Intercon + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { ++func (c *InstantSnapshotsGetCall) Context(ctx context.Context) *InstantSnapshotsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsGetCall) Header() http.Header { ++func (c *InstantSnapshotsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -112727,7 +116618,7 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -112735,21 +116626,21 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "interconnectAttachment": c.interconnectAttachment, ++ "project": c.project, ++ "zone": c.zone, ++ "instantSnapshot": c.instantSnapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.get" call. +-// Exactly one of *InterconnectAttachment or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *InterconnectAttachment.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.instantSnapshots.get" call. ++// Exactly one of *InstantSnapshot or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *InstantSnapshot.ServerResponse.Header or (if a response was returned ++// at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { ++func (c *InstantSnapshotsGetCall) Do(opts ...googleapi.CallOption) (*InstantSnapshot, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -112768,7 +116659,7 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InterconnectAttachment{ ++ ret := &InstantSnapshot{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -112780,18 +116671,18 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte + } + return ret, nil + // { +- // "description": "Returns the specified interconnect attachment.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ // "description": "Returns the specified InstantSnapshot resource in the specified zone.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + // "httpMethod": "GET", +- // "id": "compute.interconnectAttachments.get", ++ // "id": "compute.instantSnapshots.get", + // "parameterOrder": [ + // "project", +- // "region", +- // "interconnectAttachment" ++ // "zone", ++ // "instantSnapshot" + // ], + // "parameters": { +- // "interconnectAttachment": { +- // "description": "Name of the interconnect attachment to return.", ++ // "instantSnapshot": { ++ // "description": "Name of the InstantSnapshot resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -112804,17 +116695,17 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region for this request.", ++ // "zone": { ++ // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + // "response": { +- // "$ref": "InterconnectAttachment" ++ // "$ref": "InstantSnapshot" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -112825,28 +116716,213 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte + + } + +-// method id "compute.interconnectAttachments.insert": ++// method id "compute.instantSnapshots.getIamPolicy": + +-type InterconnectAttachmentsInsertCall struct { +- s *Service +- project string +- region string +- interconnectattachment *InterconnectAttachment +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstantSnapshotsGetIamPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates an InterconnectAttachment in the specified project +-// using the data included in the request. ++// GetIamPolicy: Gets the access control policy for a resource. May be ++// empty if no such policy or resource exists. + // + // - project: Project ID for this request. +-// - region: Name of the region for this request. +-func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { +- c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) GetIamPolicy(project string, zone string, resource string) *InstantSnapshotsGetIamPolicyCall { ++ c := &InstantSnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.interconnectattachment = interconnectattachment ++ c.zone = zone ++ c.resource = resource ++ return c ++} ++ ++// OptionsRequestedPolicyVersion sets the optional parameter ++// "optionsRequestedPolicyVersion": Requested IAM Policy version. ++func (c *InstantSnapshotsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *InstantSnapshotsGetIamPolicyCall { ++ c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InstantSnapshotsGetIamPolicyCall) Fields(s ...googleapi.Field) *InstantSnapshotsGetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InstantSnapshotsGetIamPolicyCall) IfNoneMatch(entityTag string) *InstantSnapshotsGetIamPolicyCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InstantSnapshotsGetIamPolicyCall) Context(ctx context.Context) *InstantSnapshotsGetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InstantSnapshotsGetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InstantSnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{resource}/getIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.instantSnapshots.getIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *InstantSnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/getIamPolicy", ++ // "httpMethod": "GET", ++ // "id": "compute.instantSnapshots.getIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "optionsRequestedPolicyVersion": { ++ // "description": "Requested IAM Policy version.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/getIamPolicy", ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.instantSnapshots.insert": ++ ++type InstantSnapshotsInsertCall struct { ++ s *Service ++ project string ++ zone string ++ instantsnapshot *InstantSnapshot ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates an instant snapshot in the specified zone. ++// ++// - project: Project ID for this request. ++// - zone: Name of the zone for this request. ++func (r *InstantSnapshotsService) Insert(project string, zone string, instantsnapshot *InstantSnapshot) *InstantSnapshotsInsertCall { ++ c := &InstantSnapshotsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.instantsnapshot = instantsnapshot + return c + } + +@@ -112861,22 +116937,15 @@ func (r *InterconnectAttachmentsService) Insert(project string, region string, i + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { ++func (c *InstantSnapshotsInsertCall) RequestId(requestId string) *InstantSnapshotsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } + +-// ValidateOnly sets the optional parameter "validateOnly": If true, the +-// request will not be committed. +-func (c *InterconnectAttachmentsInsertCall) ValidateOnly(validateOnly bool) *InterconnectAttachmentsInsertCall { +- c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) +- return c +-} +- + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { ++func (c *InstantSnapshotsInsertCall) Fields(s ...googleapi.Field) *InstantSnapshotsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -112884,21 +116953,21 @@ func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *Interc + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { ++func (c *InstantSnapshotsInsertCall) Context(ctx context.Context) *InstantSnapshotsInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsInsertCall) Header() http.Header { ++func (c *InstantSnapshotsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -112906,14 +116975,14 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instantsnapshot) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -112922,19 +116991,19 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "region": c.region, ++ "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.insert" call. ++// Do executes the "compute.instantSnapshots.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstantSnapshotsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -112965,13 +117034,13 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O + } + return ret, nil + // { +- // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", ++ // "description": "Creates an instant snapshot in the specified zone.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots", + // "httpMethod": "POST", +- // "id": "compute.interconnectAttachments.insert", ++ // "id": "compute.instantSnapshots.insert", + // "parameterOrder": [ + // "project", +- // "region" ++ // "zone" + // ], + // "parameters": { + // "project": { +@@ -112981,27 +117050,22 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, +- // "validateOnly": { +- // "description": "If true, the request will not be committed.", +- // "location": "query", +- // "type": "boolean" ++ // "zone": { ++ // "description": "Name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots", + // "request": { +- // "$ref": "InterconnectAttachment" ++ // "$ref": "InstantSnapshot" + // }, + // "response": { + // "$ref": "Operation" +@@ -113014,27 +117078,27 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O + + } + +-// method id "compute.interconnectAttachments.list": ++// method id "compute.instantSnapshots.list": + +-type InterconnectAttachmentsListCall struct { ++type InstantSnapshotsListCall struct { + s *Service + project string +- region string ++ zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// List: Retrieves the list of interconnect attachments contained within +-// the specified region. ++// List: Retrieves the list of InstantSnapshot resources contained ++// within the specified zone. + // + // - project: Project ID for this request. +-// - region: Name of the region for this request. +-func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { +- c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) List(project string, zone string) *InstantSnapshotsListCall { ++ c := &InstantSnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region ++ c.zone = zone + return c + } + +@@ -113073,7 +117137,7 @@ func (r *InterconnectAttachmentsService) List(project string, region string) *In + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) Filter(filter string) *InstantSnapshotsListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -113084,7 +117148,7 @@ func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAtt + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) MaxResults(maxResults int64) *InstantSnapshotsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -113098,7 +117162,7 @@ func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *Intercon + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) OrderBy(orderBy string) *InstantSnapshotsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -113106,7 +117170,7 @@ func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectA + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) PageToken(pageToken string) *InstantSnapshotsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -113115,7 +117179,7 @@ func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *Interconn + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstantSnapshotsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -113123,7 +117187,7 @@ func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSucc + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) Fields(s ...googleapi.Field) *InstantSnapshotsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -113133,7 +117197,7 @@ func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *Intercon + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) IfNoneMatch(entityTag string) *InstantSnapshotsListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -113141,21 +117205,21 @@ func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *Interco + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) Context(ctx context.Context) *InstantSnapshotsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsListCall) Header() http.Header { ++func (c *InstantSnapshotsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -113168,7 +117232,7 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -113177,19 +117241,19 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "region": c.region, ++ "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.list" call. +-// Exactly one of *InterconnectAttachmentList or error will be non-nil. +-// Any non-2xx status code is an error. Response headers are in either +-// *InterconnectAttachmentList.ServerResponse.Header or (if a response +-// was returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.instantSnapshots.list" call. ++// Exactly one of *InstantSnapshotList or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *InstantSnapshotList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { ++func (c *InstantSnapshotsListCall) Do(opts ...googleapi.CallOption) (*InstantSnapshotList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -113208,7 +117272,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InterconnectAttachmentList{ ++ ret := &InstantSnapshotList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -113220,13 +117284,13 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int + } + return ret, nil + // { +- // "description": "Retrieves the list of interconnect attachments contained within the specified region.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", ++ // "description": "Retrieves the list of InstantSnapshot resources contained within the specified zone.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots", + // "httpMethod": "GET", +- // "id": "compute.interconnectAttachments.list", ++ // "id": "compute.instantSnapshots.list", + // "parameterOrder": [ + // "project", +- // "region" ++ // "zone" + // ], + // "parameters": { + // "filter": { +@@ -113259,22 +117323,22 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots", + // "response": { +- // "$ref": "InterconnectAttachmentList" ++ // "$ref": "InstantSnapshotList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -113288,7 +117352,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { ++func (c *InstantSnapshotsListCall) Pages(ctx context.Context, f func(*InstantSnapshotList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -113306,56 +117370,38 @@ func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*Int + } + } + +-// method id "compute.interconnectAttachments.patch": ++// method id "compute.instantSnapshots.setIamPolicy": + +-type InterconnectAttachmentsPatchCall struct { +- s *Service +- project string +- region string +- interconnectAttachment string +- interconnectattachment *InterconnectAttachment +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstantSnapshotsSetIamPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ zonesetpolicyrequest *ZoneSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Patch: Updates the specified interconnect attachment with the data +-// included in the request. This method supports PATCH semantics and +-// uses the JSON merge patch format and processing rules. ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. + // +-// - interconnectAttachment: Name of the interconnect attachment to +-// patch. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { +- c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstantSnapshotsSetIamPolicyCall { ++ c := &InstantSnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.interconnectAttachment = interconnectAttachment +- c.interconnectattachment = interconnectattachment +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { +- c.urlParams_.Set("requestId", requestId) ++ c.zone = zone ++ c.resource = resource ++ c.zonesetpolicyrequest = zonesetpolicyrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { ++func (c *InstantSnapshotsSetIamPolicyCall) Fields(s ...googleapi.Field) *InstantSnapshotsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -113363,21 +117409,21 @@ func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *Interco + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { ++func (c *InstantSnapshotsSetIamPolicyCall) Context(ctx context.Context) *InstantSnapshotsSetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsPatchCall) Header() http.Header { ++func (c *InstantSnapshotsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -113385,36 +117431,36 @@ func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "interconnectAttachment": c.interconnectAttachment, ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.patch" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.instantSnapshots.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *InstantSnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -113433,7 +117479,7 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -113445,23 +117491,16 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op + } + return ret, nil + // { +- // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", +- // "httpMethod": "PATCH", +- // "id": "compute.interconnectAttachments.patch", ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setIamPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.instantSnapshots.setIamPolicy", + // "parameterOrder": [ + // "project", +- // "region", +- // "interconnectAttachment" ++ // "zone", ++ // "resource" + // ], + // "parameters": { +- // "interconnectAttachment": { +- // "description": "Name of the interconnect attachment to patch.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -113469,25 +117508,27 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region scoping this request.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setIamPolicy", + // "request": { +- // "$ref": "InterconnectAttachment" ++ // "$ref": "ZoneSetPolicyRequest" + // }, + // "response": { +- // "$ref": "Operation" ++ // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -113497,31 +117538,31 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op + + } + +-// method id "compute.interconnectAttachments.setLabels": ++// method id "compute.instantSnapshots.setLabels": + +-type InterconnectAttachmentsSetLabelsCall struct { +- s *Service +- project string +- region string +- resource string +- regionsetlabelsrequest *RegionSetLabelsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstantSnapshotsSetLabelsCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ zonesetlabelsrequest *ZoneSetLabelsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetLabels: Sets the labels on an InterconnectAttachment. To learn +-// more about labels, read the Labeling Resources documentation. ++// SetLabels: Sets the labels on a instantSnapshot in the given zone. To ++// learn more about labels, read the Labeling Resources documentation. + // + // - project: Project ID for this request. +-// - region: The region for this request. + // - resource: Name or id of the resource for this request. +-func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { +- c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *InstantSnapshotsSetLabelsCall { ++ c := &InstantSnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region ++ c.zone = zone + c.resource = resource +- c.regionsetlabelsrequest = regionsetlabelsrequest ++ c.zonesetlabelsrequest = zonesetlabelsrequest + return c + } + +@@ -113536,7 +117577,7 @@ func (r *InterconnectAttachmentsService) SetLabels(project string, region string + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { ++func (c *InstantSnapshotsSetLabelsCall) RequestId(requestId string) *InstantSnapshotsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -113544,7 +117585,7 @@ func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *Inte + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { ++func (c *InstantSnapshotsSetLabelsCall) Fields(s ...googleapi.Field) *InstantSnapshotsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -113552,21 +117593,21 @@ func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *Int + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { ++func (c *InstantSnapshotsSetLabelsCall) Context(ctx context.Context) *InstantSnapshotsSetLabelsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { ++func (c *InstantSnapshotsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -113574,14 +117615,14 @@ func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Resp + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -113590,20 +117631,20 @@ func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Resp + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "region": c.region, ++ "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.setLabels" call. ++// Do executes the "compute.instantSnapshots.setLabels" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstantSnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -113634,13 +117675,13 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", ++ // "description": "Sets the labels on a instantSnapshot in the given zone. To learn more about labels, read the Labeling Resources documentation.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setLabels", + // "httpMethod": "POST", +- // "id": "compute.interconnectAttachments.setLabels", ++ // "id": "compute.instantSnapshots.setLabels", + // "parameterOrder": [ + // "project", +- // "region", ++ // "zone", + // "resource" + // ], + // "parameters": { +@@ -113651,13 +117692,6 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "The region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", +@@ -113669,11 +117703,18 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setLabels", + // "request": { +- // "$ref": "RegionSetLabelsRequest" ++ // "$ref": "ZoneSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -113686,12 +117727,12 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.interconnectAttachments.testIamPermissions": ++// method id "compute.instantSnapshots.testIamPermissions": + +-type InterconnectAttachmentsTestIamPermissionsCall struct { ++type InstantSnapshotsTestIamPermissionsCall struct { + s *Service + project string +- region string ++ zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams +@@ -113703,12 +117744,12 @@ type InterconnectAttachmentsTestIamPermissionsCall struct { + // specified resource. + // + // - project: Project ID for this request. +-// - region: The name of the region for this request. + // - resource: Name or id of the resource for this request. +-func (r *InterconnectAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectAttachmentsTestIamPermissionsCall { +- c := &InterconnectAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstantSnapshotsTestIamPermissionsCall { ++ c := &InstantSnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region ++ c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +@@ -113717,7 +117758,7 @@ func (r *InterconnectAttachmentsService) TestIamPermissions(project string, regi + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsTestIamPermissionsCall { ++func (c *InstantSnapshotsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstantSnapshotsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -113725,21 +117766,21 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Fi + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectAttachmentsTestIamPermissionsCall { ++func (c *InstantSnapshotsTestIamPermissionsCall) Context(ctx context.Context) *InstantSnapshotsTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { ++func (c *InstantSnapshotsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -113754,7 +117795,7 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (* + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -113763,20 +117804,20 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (* + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "region": c.region, ++ "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.testIamPermissions" call. ++// Do executes the "compute.instantSnapshots.testIamPermissions" call. + // Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either + // *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *InstantSnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -113808,12 +117849,12 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/testIamPermissions", + // "httpMethod": "POST", +- // "id": "compute.interconnectAttachments.testIamPermissions", ++ // "id": "compute.instantSnapshots.testIamPermissions", + // "parameterOrder": [ + // "project", +- // "region", ++ // "zone", + // "resource" + // ], + // "parameters": { +@@ -113824,22 +117865,22 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "The name of the region for this request.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", ++ // "zone": { ++ // "description": "The name of the zone for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, +@@ -113855,171 +117896,9 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal + + } + +-// method id "compute.interconnectLocations.get": +- +-type InterconnectLocationsGetCall struct { +- s *Service +- project string +- interconnectLocation string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header +-} +- +-// Get: Returns the details for the specified interconnect location. +-// Gets a list of available interconnect locations by making a list() +-// request. +-// +-// - interconnectLocation: Name of the interconnect location to return. +-// - project: Project ID for this request. +-func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { +- c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.interconnectLocation = interconnectLocation +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *InterconnectLocationsGetCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } +- var body io.Reader = nil +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations/{interconnectLocation}") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "interconnectLocation": c.interconnectLocation, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.interconnectLocations.get" call. +-// Exactly one of *InterconnectLocation or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *InterconnectLocation.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &InterconnectLocation{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", +- // "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", +- // "httpMethod": "GET", +- // "id": "compute.interconnectLocations.get", +- // "parameterOrder": [ +- // "project", +- // "interconnectLocation" +- // ], +- // "parameters": { +- // "interconnectLocation": { +- // "description": "Name of the interconnect location to return.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", +- // "response": { +- // "$ref": "InterconnectLocation" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" +- // ] +- // } +- +-} +- +-// method id "compute.interconnectLocations.list": ++// method id "compute.interconnectAttachments.aggregatedList": + +-type InterconnectLocationsListCall struct { ++type InterconnectAttachmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -114028,12 +117907,12 @@ type InterconnectLocationsListCall struct { + header_ http.Header + } + +-// List: Retrieves the list of interconnect locations available to the +-// specified project. ++// AggregatedList: Retrieves an aggregated list of interconnect ++// attachments. + // + // - project: Project ID for this request. +-func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { +- c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { ++ c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -114073,18 +117952,31 @@ func (r *InterconnectLocationsService) List(project string) *InterconnectLocatio + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c + } + ++// IncludeAllScopes sets the optional parameter "includeAllScopes": ++// Indicates whether every visible scope for each scope type (zone, ++// region, global) should be included in the response. For new resource ++// types added after this field, the flag has no effect as new resource ++// types will always include every visible scope for each scope type in ++// response. For resource types which predate this field, if this flag ++// is omitted or false, only scopes of the scope types where the ++// resource type is expected to be found will be included. ++func (c *InterconnectAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InterconnectAttachmentsAggregatedListCall { ++ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++ return c ++} ++ + // MaxResults sets the optional parameter "maxResults": The maximum + // number of results per page that should be returned. If the number of + // available results is larger than `maxResults`, Compute Engine returns + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -114098,7 +117990,7 @@ func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *Interconne + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -114106,7 +117998,7 @@ func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLoc + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -114115,7 +118007,7 @@ func (c *InterconnectLocationsListCall) PageToken(pageToken string) *Interconnec + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -114123,7 +118015,7 @@ func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSucces + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -114133,7 +118025,7 @@ func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *Interconne + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -114141,21 +118033,21 @@ func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *Interconn + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectLocationsListCall) Header() http.Header { ++func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -114168,7 +118060,7 @@ func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, e + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -114181,14 +118073,15 @@ func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, e + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectLocations.list" call. +-// Exactly one of *InterconnectLocationList or error will be non-nil. +-// Any non-2xx status code is an error. Response headers are in either +-// *InterconnectLocationList.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { ++// Do executes the "compute.interconnectAttachments.aggregatedList" call. ++// Exactly one of *InterconnectAttachmentAggregatedList or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -114207,7 +118100,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InterconnectLocationList{ ++ ret := &InterconnectAttachmentAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -114219,10 +118112,10 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter + } + return ret, nil + // { +- // "description": "Retrieves the list of interconnect locations available to the specified project.", +- // "flatPath": "projects/{project}/global/interconnectLocations", ++ // "description": "Retrieves an aggregated list of interconnect attachments.", ++ // "flatPath": "projects/{project}/aggregated/interconnectAttachments", + // "httpMethod": "GET", +- // "id": "compute.interconnectLocations.list", ++ // "id": "compute.interconnectAttachments.aggregatedList", + // "parameterOrder": [ + // "project" + // ], +@@ -114232,6 +118125,11 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter + // "location": "query", + // "type": "string" + // }, ++ // "includeAllScopes": { ++ // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -114263,9 +118161,9 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/interconnectLocations", ++ // "path": "projects/{project}/aggregated/interconnectAttachments", + // "response": { +- // "$ref": "InterconnectLocationList" ++ // "$ref": "InterconnectAttachmentAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -114279,7 +118177,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { ++func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -114297,25 +118195,29 @@ func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*Inter + } + } + +-// method id "compute.interconnects.delete": ++// method id "compute.interconnectAttachments.delete": + +-type InterconnectsDeleteCall struct { +- s *Service +- project string +- interconnect string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InterconnectAttachmentsDeleteCall struct { ++ s *Service ++ project string ++ region string ++ interconnectAttachment string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified Interconnect. ++// Delete: Deletes the specified interconnect attachment. + // +-// - interconnect: Name of the interconnect to delete. +-// - project: Project ID for this request. +-func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { +- c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - interconnectAttachment: Name of the interconnect attachment to ++// delete. ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { ++ c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.interconnect = interconnect ++ c.region = region ++ c.interconnectAttachment = interconnectAttachment + return c + } + +@@ -114330,7 +118232,7 @@ func (r *InterconnectsService) Delete(project string, interconnect string) *Inte + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { ++func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -114338,7 +118240,7 @@ func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDele + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { ++func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -114346,21 +118248,21 @@ func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDel + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { ++func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsDeleteCall) Header() http.Header { ++func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -114370,7 +118272,7 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { +@@ -114378,20 +118280,21 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "interconnect": c.interconnect, ++ "project": c.project, ++ "region": c.region, ++ "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.delete" call. ++// Do executes the "compute.interconnectAttachments.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -114422,17 +118325,18 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + } + return ret, nil + // { +- // "description": "Deletes the specified Interconnect.", +- // "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ // "description": "Deletes the specified interconnect attachment.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "httpMethod": "DELETE", +- // "id": "compute.interconnects.delete", ++ // "id": "compute.interconnectAttachments.delete", + // "parameterOrder": [ + // "project", +- // "interconnect" ++ // "region", ++ // "interconnectAttachment" + // ], + // "parameters": { +- // "interconnect": { +- // "description": "Name of the interconnect to delete.", ++ // "interconnectAttachment": { ++ // "description": "Name of the interconnect attachment to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -114445,13 +118349,20 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/interconnects/{interconnect}", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "response": { + // "$ref": "Operation" + // }, +@@ -114463,34 +118374,37 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + +-// method id "compute.interconnects.get": ++// method id "compute.interconnectAttachments.get": + +-type InterconnectsGetCall struct { +- s *Service +- project string +- interconnect string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type InterconnectAttachmentsGetCall struct { ++ s *Service ++ project string ++ region string ++ interconnectAttachment string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified Interconnect. Get a list of available +-// Interconnects by making a list() request. ++// Get: Returns the specified interconnect attachment. + // +-// - interconnect: Name of the interconnect to return. +-// - project: Project ID for this request. +-func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { +- c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - interconnectAttachment: Name of the interconnect attachment to ++// return. ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { ++ c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.interconnect = interconnect ++ c.region = region ++ c.interconnectAttachment = interconnectAttachment + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { ++func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -114500,7 +118414,7 @@ func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCal + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { ++func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -114508,21 +118422,21 @@ func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCa + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { ++func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsGetCall) Header() http.Header { ++func (c *InterconnectAttachmentsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -114535,7 +118449,7 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -114543,20 +118457,21 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "interconnect": c.interconnect, ++ "project": c.project, ++ "region": c.region, ++ "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.get" call. +-// Exactly one of *Interconnect or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Interconnect.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { ++// Do executes the "compute.interconnectAttachments.get" call. ++// Exactly one of *InterconnectAttachment or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *InterconnectAttachment.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -114575,7 +118490,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Interconnect{ ++ ret := &InterconnectAttachment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -114587,17 +118502,18 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, + } + return ret, nil + // { +- // "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", +- // "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ // "description": "Returns the specified interconnect attachment.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "httpMethod": "GET", +- // "id": "compute.interconnects.get", ++ // "id": "compute.interconnectAttachments.get", + // "parameterOrder": [ + // "project", +- // "interconnect" ++ // "region", ++ // "interconnectAttachment" + // ], + // "parameters": { +- // "interconnect": { +- // "description": "Name of the interconnect to return.", ++ // "interconnectAttachment": { ++ // "description": "Name of the interconnect attachment to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -114609,173 +118525,18 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/global/interconnects/{interconnect}", +- // "response": { +- // "$ref": "Interconnect" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" +- // ] +- // } +- +-} +- +-// method id "compute.interconnects.getDiagnostics": +- +-type InterconnectsGetDiagnosticsCall struct { +- s *Service +- project string +- interconnect string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header +-} +- +-// GetDiagnostics: Returns the interconnectDiagnostics for the specified +-// Interconnect. +-// +-// - interconnect: Name of the interconnect resource to query. +-// - project: Project ID for this request. +-func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { +- c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.interconnect = interconnect +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } +- var body io.Reader = nil +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}/getDiagnostics") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "interconnect": c.interconnect, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.interconnects.getDiagnostics" call. +-// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be +-// non-nil. Any non-2xx status code is an error. Response headers are in +-// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or +-// (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &InterconnectsGetDiagnosticsResponse{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Returns the interconnectDiagnostics for the specified Interconnect.", +- // "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", +- // "httpMethod": "GET", +- // "id": "compute.interconnects.getDiagnostics", +- // "parameterOrder": [ +- // "project", +- // "interconnect" +- // ], +- // "parameters": { +- // "interconnect": { +- // "description": "Name of the interconnect resource to query.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" + // }, +- // "project": { +- // "description": "Project ID for this request.", ++ // "region": { ++ // "description": "Name of the region for this request.", + // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "response": { +- // "$ref": "InterconnectsGetDiagnosticsResponse" ++ // "$ref": "InterconnectAttachment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -114786,25 +118547,28 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int + + } + +-// method id "compute.interconnects.insert": ++// method id "compute.interconnectAttachments.insert": + +-type InterconnectsInsertCall struct { +- s *Service +- project string +- interconnect *Interconnect +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InterconnectAttachmentsInsertCall struct { ++ s *Service ++ project string ++ region string ++ interconnectattachment *InterconnectAttachment ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates an Interconnect in the specified project using the +-// data included in the request. ++// Insert: Creates an InterconnectAttachment in the specified project ++// using the data included in the request. + // + // - project: Project ID for this request. +-func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { +- c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region for this request. ++func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { ++ c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.interconnect = interconnect ++ c.region = region ++ c.interconnectattachment = interconnectattachment + return c + } + +@@ -114819,15 +118583,22 @@ func (r *InterconnectsService) Insert(project string, interconnect *Interconnect + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { ++func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } + ++// ValidateOnly sets the optional parameter "validateOnly": If true, the ++// request will not be committed. ++func (c *InterconnectAttachmentsInsertCall) ValidateOnly(validateOnly bool) *InterconnectAttachmentsInsertCall { ++ c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) ++ return c ++} ++ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { ++func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -114835,21 +118606,21 @@ func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsIns + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { ++func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsInsertCall) Header() http.Header { ++func (c *InterconnectAttachmentsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -114857,14 +118628,14 @@ func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -114873,18 +118644,19 @@ func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.insert" call. ++// Do executes the "compute.interconnectAttachments.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -114915,12 +118687,13 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, + } + return ret, nil + // { +- // "description": "Creates an Interconnect in the specified project using the data included in the request.", +- // "flatPath": "projects/{project}/global/interconnects", ++ // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", + // "httpMethod": "POST", +- // "id": "compute.interconnects.insert", ++ // "id": "compute.interconnectAttachments.insert", + // "parameterOrder": [ +- // "project" ++ // "project", ++ // "region" + // ], + // "parameters": { + // "project": { +@@ -114930,15 +118703,27 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" ++ // }, ++ // "validateOnly": { ++ // "description": "If true, the request will not be committed.", ++ // "location": "query", ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/interconnects", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments", + // "request": { +- // "$ref": "Interconnect" ++ // "$ref": "InterconnectAttachment" + // }, + // "response": { + // "$ref": "Operation" +@@ -114951,24 +118736,27 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + +-// method id "compute.interconnects.list": ++// method id "compute.interconnectAttachments.list": + +-type InterconnectsListCall struct { ++type InterconnectAttachmentsListCall struct { + s *Service + project string ++ region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// List: Retrieves the list of Interconnects available to the specified +-// project. ++// List: Retrieves the list of interconnect attachments contained within ++// the specified region. + // + // - project: Project ID for this request. +-func (r *InterconnectsService) List(project string) *InterconnectsListCall { +- c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region for this request. ++func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { ++ c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project ++ c.region = region + return c + } + +@@ -115007,7 +118795,7 @@ func (r *InterconnectsService) List(project string) *InterconnectsListCall { + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -115018,7 +118806,7 @@ func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -115032,7 +118820,7 @@ func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListC + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -115040,7 +118828,7 @@ func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -115049,7 +118837,7 @@ func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCa + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -115057,7 +118845,7 @@ func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -115067,7 +118855,7 @@ func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListC + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -115075,21 +118863,21 @@ func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsList + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsListCall) Header() http.Header { ++func (c *InterconnectAttachmentsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -115102,7 +118890,7 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -115111,18 +118899,19 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.list" call. +-// Exactly one of *InterconnectList or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *InterconnectList.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.interconnectAttachments.list" call. ++// Exactly one of *InterconnectAttachmentList or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *InterconnectAttachmentList.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { ++func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115141,7 +118930,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InterconnectList{ ++ ret := &InterconnectAttachmentList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -115153,12 +118942,13 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL + } + return ret, nil + // { +- // "description": "Retrieves the list of Interconnects available to the specified project.", +- // "flatPath": "projects/{project}/global/interconnects", ++ // "description": "Retrieves the list of interconnect attachments contained within the specified region.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", + // "httpMethod": "GET", +- // "id": "compute.interconnects.list", ++ // "id": "compute.interconnectAttachments.list", + // "parameterOrder": [ +- // "project" ++ // "project", ++ // "region" + // ], + // "parameters": { + // "filter": { +@@ -115191,15 +118981,22 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/interconnects", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments", + // "response": { +- // "$ref": "InterconnectList" ++ // "$ref": "InterconnectAttachmentList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -115213,7 +119010,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { ++func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -115231,29 +119028,33 @@ func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectL + } + } + +-// method id "compute.interconnects.patch": ++// method id "compute.interconnectAttachments.patch": + +-type InterconnectsPatchCall struct { +- s *Service +- project string +- interconnect string +- interconnect2 *Interconnect +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InterconnectAttachmentsPatchCall struct { ++ s *Service ++ project string ++ region string ++ interconnectAttachment string ++ interconnectattachment *InterconnectAttachment ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Patch: Updates the specified Interconnect with the data included in +-// the request. This method supports PATCH semantics and uses the JSON +-// merge patch format and processing rules. ++// Patch: Updates the specified interconnect attachment with the data ++// included in the request. This method supports PATCH semantics and ++// uses the JSON merge patch format and processing rules. + // +-// - interconnect: Name of the interconnect to update. +-// - project: Project ID for this request. +-func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { +- c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - interconnectAttachment: Name of the interconnect attachment to ++// patch. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { ++ c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.interconnect = interconnect +- c.interconnect2 = interconnect2 ++ c.region = region ++ c.interconnectAttachment = interconnectAttachment ++ c.interconnectattachment = interconnectattachment + return c + } + +@@ -115268,7 +119069,7 @@ func (r *InterconnectsService) Patch(project string, interconnect string, interc + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { ++func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -115276,7 +119077,7 @@ func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatch + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { ++func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -115284,21 +119085,21 @@ func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatc + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { ++func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsPatchCall) Header() http.Header { ++func (c *InterconnectAttachmentsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -115306,14 +119107,14 @@ func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { +@@ -115321,20 +119122,21 @@ func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "interconnect": c.interconnect, ++ "project": c.project, ++ "region": c.region, ++ "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.patch" call. ++// Do executes the "compute.interconnectAttachments.patch" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115365,17 +119167,18 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e + } + return ret, nil + // { +- // "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- // "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "httpMethod": "PATCH", +- // "id": "compute.interconnects.patch", ++ // "id": "compute.interconnectAttachments.patch", + // "parameterOrder": [ + // "project", +- // "interconnect" ++ // "region", ++ // "interconnectAttachment" + // ], + // "parameters": { +- // "interconnect": { +- // "description": "Name of the interconnect to update.", ++ // "interconnectAttachment": { ++ // "description": "Name of the interconnect attachment to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -115388,15 +119191,22 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/interconnects/{interconnect}", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "request": { +- // "$ref": "Interconnect" ++ // "$ref": "InterconnectAttachment" + // }, + // "response": { + // "$ref": "Operation" +@@ -115409,35 +119219,54 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e + + } + +-// method id "compute.interconnects.setLabels": ++// method id "compute.interconnectAttachments.setLabels": + +-type InterconnectsSetLabelsCall struct { ++type InterconnectAttachmentsSetLabelsCall struct { + s *Service + project string ++ region string + resource string +- globalsetlabelsrequest *GlobalSetLabelsRequest ++ regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// SetLabels: Sets the labels on an Interconnect. To learn more about +-// labels, read the Labeling Resources documentation. ++// SetLabels: Sets the labels on an InterconnectAttachment. To learn ++// more about labels, read the Labeling Resources documentation. + // + // - project: Project ID for this request. ++// - region: The region for this request. + // - resource: Name or id of the resource for this request. +-func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { +- c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { ++ c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project ++ c.region = region + c.resource = resource +- c.globalsetlabelsrequest = globalsetlabelsrequest ++ c.regionsetlabelsrequest = regionsetlabelsrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { ++func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -115445,21 +119274,21 @@ func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *Interconnects + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { ++func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsSetLabelsCall) Header() http.Header { ++func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -115467,14 +119296,14 @@ func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, erro + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{resource}/setLabels") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -115483,19 +119312,20 @@ func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, erro + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, ++ "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.setLabels" call. ++// Do executes the "compute.interconnectAttachments.setLabels" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115526,12 +119356,13 @@ func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operatio + } + return ret, nil + // { +- // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", +- // "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", ++ // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + // "httpMethod": "POST", +- // "id": "compute.interconnects.setLabels", ++ // "id": "compute.interconnectAttachments.setLabels", + // "parameterOrder": [ + // "project", ++ // "region", + // "resource" + // ], + // "parameters": { +@@ -115542,17 +119373,29 @@ func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operatio + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "The region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/interconnects/{resource}/setLabels", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + // "request": { +- // "$ref": "GlobalSetLabelsRequest" ++ // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -115565,11 +119408,12 @@ func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operatio + + } + +-// method id "compute.interconnects.testIamPermissions": ++// method id "compute.interconnectAttachments.testIamPermissions": + +-type InterconnectsTestIamPermissionsCall struct { ++type InterconnectAttachmentsTestIamPermissionsCall struct { + s *Service + project string ++ region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams +@@ -115581,10 +119425,12 @@ type InterconnectsTestIamPermissionsCall struct { + // specified resource. + // + // - project: Project ID for this request. ++// - region: The name of the region for this request. + // - resource: Name or id of the resource for this request. +-func (r *InterconnectsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectsTestIamPermissionsCall { +- c := &InterconnectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectAttachmentsTestIamPermissionsCall { ++ c := &InterconnectAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project ++ c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +@@ -115593,7 +119439,7 @@ func (r *InterconnectsService) TestIamPermissions(project string, resource strin + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectsTestIamPermissionsCall { ++func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -115601,21 +119447,21 @@ func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Inte + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectsTestIamPermissionsCall { ++func (c *InterconnectAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectAttachmentsTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { ++func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -115630,7 +119476,7 @@ func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Respo + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -115639,19 +119485,20 @@ func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Respo + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, ++ "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.testIamPermissions" call. ++// Do executes the "compute.interconnectAttachments.testIamPermissions" call. + // Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either + // *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115683,11 +119530,12 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/global/interconnects/{resource}/testIamPermissions", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", + // "httpMethod": "POST", +- // "id": "compute.interconnects.testIamPermissions", ++ // "id": "compute.interconnectAttachments.testIamPermissions", + // "parameterOrder": [ + // "project", ++ // "region", + // "resource" + // ], + // "parameters": { +@@ -115698,15 +119546,22 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/interconnects/{resource}/testIamPermissions", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, +@@ -115722,37 +119577,35 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + + } + +-// method id "compute.licenseCodes.get": ++// method id "compute.interconnectLocations.get": + +-type LicenseCodesGetCall struct { +- s *Service +- project string +- licenseCode string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type InterconnectLocationsGetCall struct { ++ s *Service ++ project string ++ interconnectLocation string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Return a specified license code. License codes are mirrored +-// across all projects that have permissions to read the License Code. +-// *Caution* This resource is intended for use only by third-party +-// partners who are creating Cloud Marketplace images. ++// Get: Returns the details for the specified interconnect location. ++// Gets a list of available interconnect locations by making a list() ++// request. + // +-// - licenseCode: Number corresponding to the License code resource to +-// return. +-// - project: Project ID for this request. +-func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { +- c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - interconnectLocation: Name of the interconnect location to return. ++// - project: Project ID for this request. ++func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { ++ c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.licenseCode = licenseCode ++ c.interconnectLocation = interconnectLocation + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { ++func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -115762,7 +119615,7 @@ func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { ++func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -115770,21 +119623,21 @@ func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { ++func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicenseCodesGetCall) Header() http.Header { ++func (c *InterconnectLocationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -115797,7 +119650,7 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{licenseCode}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations/{interconnectLocation}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -115805,20 +119658,20 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "licenseCode": c.licenseCode, ++ "project": c.project, ++ "interconnectLocation": c.interconnectLocation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenseCodes.get" call. +-// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *LicenseCode.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { ++// Do executes the "compute.interconnectLocations.get" call. ++// Exactly one of *InterconnectLocation or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *InterconnectLocation.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115837,7 +119690,7 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &LicenseCode{ ++ ret := &InterconnectLocation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -115849,19 +119702,19 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er + } + return ret, nil + // { +- // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenseCodes/{licenseCode}", ++ // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", ++ // "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", + // "httpMethod": "GET", +- // "id": "compute.licenseCodes.get", ++ // "id": "compute.interconnectLocations.get", + // "parameterOrder": [ + // "project", +- // "licenseCode" ++ // "interconnectLocation" + // ], + // "parameters": { +- // "licenseCode": { +- // "description": "Number corresponding to the License code resource to return.", ++ // "interconnectLocation": { ++ // "description": "Name of the interconnect location to return.", + // "location": "path", +- // "pattern": "[0-9]{0,61}?", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -115873,9 +119726,9 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenseCodes/{licenseCode}", ++ // "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", + // "response": { +- // "$ref": "LicenseCode" ++ // "$ref": "InterconnectLocation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -115886,94 +119739,382 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er + + } + +-// method id "compute.licenseCodes.testIamPermissions": ++// method id "compute.interconnectLocations.list": + +-type LicenseCodesTestIamPermissionsCall struct { +- s *Service +- project string +- resource string +- testpermissionsrequest *TestPermissionsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InterconnectLocationsListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. *Caution* This resource is intended for use only +-// by third-party partners who are creating Cloud Marketplace images. ++// List: Retrieves the list of interconnect locations available to the ++// specified project. + // + // - project: Project ID for this request. +-// - resource: Name or id of the resource for this request. +-func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { +- c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { ++ c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectLocationsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { ++func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { ++func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { ++func (c *InterconnectLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } +- reqHeaders.Set("Content-Type", "application/json") ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.interconnectLocations.list" call. ++// Exactly one of *InterconnectLocationList or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *InterconnectLocationList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &InterconnectLocationList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves the list of interconnect locations available to the specified project.", ++ // "flatPath": "projects/{project}/global/interconnectLocations", ++ // "httpMethod": "GET", ++ // "id": "compute.interconnectLocations.list", ++ // "parameterOrder": [ ++ // "project" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/global/interconnectLocations", ++ // "response": { ++ // "$ref": "InterconnectLocationList" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.interconnectRemoteLocations.get": ++ ++type InterconnectRemoteLocationsGetCall struct { ++ s *Service ++ project string ++ interconnectRemoteLocation string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Get: Returns the details for the specified interconnect remote ++// location. Gets a list of available interconnect remote locations by ++// making a list() request. ++// ++// - interconnectRemoteLocation: Name of the interconnect remote ++// location to return. ++// - project: Project ID for this request. ++func (r *InterconnectRemoteLocationsService) Get(project string, interconnectRemoteLocation string) *InterconnectRemoteLocationsGetCall { ++ c := &InterconnectRemoteLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.interconnectRemoteLocation = interconnectRemoteLocation ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InterconnectRemoteLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InterconnectRemoteLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InterconnectRemoteLocationsGetCall) Context(ctx context.Context) *InterconnectRemoteLocationsGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InterconnectRemoteLocationsGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InterconnectRemoteLocationsGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "resource": c.resource, ++ "project": c.project, ++ "interconnectRemoteLocation": c.interconnectRemoteLocation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenseCodes.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.interconnectRemoteLocations.get" call. ++// Exactly one of *InterconnectRemoteLocation or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *InterconnectRemoteLocation.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *InterconnectRemoteLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115992,7 +120133,7 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &TestPermissionsResponse{ ++ ret := &InterconnectRemoteLocation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -116004,36 +120145,292 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", +- // "httpMethod": "POST", +- // "id": "compute.licenseCodes.testIamPermissions", ++ // "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", ++ // "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ // "httpMethod": "GET", ++ // "id": "compute.interconnectRemoteLocations.get", + // "parameterOrder": [ + // "project", +- // "resource" ++ // "interconnectRemoteLocation" + // ], + // "parameters": { ++ // "interconnectRemoteLocation": { ++ // "description": "Name of the interconnect remote location to return.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ // "response": { ++ // "$ref": "InterconnectRemoteLocation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.interconnectRemoteLocations.list": ++ ++type InterconnectRemoteLocationsListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// List: Retrieves the list of interconnect remote locations available ++// to the specified project. ++// ++// - project: Project ID for this request. ++func (r *InterconnectRemoteLocationsService) List(project string) *InterconnectRemoteLocationsListCall { ++ c := &InterconnectRemoteLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *InterconnectRemoteLocationsListCall) Filter(filter string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *InterconnectRemoteLocationsListCall) MaxResults(maxResults int64) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *InterconnectRemoteLocationsListCall) OrderBy(orderBy string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *InterconnectRemoteLocationsListCall) PageToken(pageToken string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *InterconnectRemoteLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InterconnectRemoteLocationsListCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InterconnectRemoteLocationsListCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InterconnectRemoteLocationsListCall) Context(ctx context.Context) *InterconnectRemoteLocationsListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InterconnectRemoteLocationsListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InterconnectRemoteLocationsListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.interconnectRemoteLocations.list" call. ++// Exactly one of *InterconnectRemoteLocationList or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *InterconnectRemoteLocationList.ServerResponse.Header or (if a ++// response was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectRemoteLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocationList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &InterconnectRemoteLocationList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves the list of interconnect remote locations available to the specified project.", ++ // "flatPath": "projects/{project}/global/interconnectRemoteLocations", ++ // "httpMethod": "GET", ++ // "id": "compute.interconnectRemoteLocations.list", ++ // "parameterOrder": [ ++ // "project" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", +- // "request": { +- // "$ref": "TestPermissionsRequest" +- // }, ++ // "path": "projects/{project}/global/interconnectRemoteLocations", + // "response": { +- // "$ref": "TestPermissionsResponse" ++ // "$ref": "InterconnectRemoteLocationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -116044,27 +120441,46 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* + + } + +-// method id "compute.licenses.delete": ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *InterconnectRemoteLocationsListCall) Pages(ctx context.Context, f func(*InterconnectRemoteLocationList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} + +-type LicensesDeleteCall struct { +- s *Service +- project string +- license string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++// method id "compute.interconnects.delete": ++ ++type InterconnectsDeleteCall struct { ++ s *Service ++ project string ++ interconnect string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified license. *Caution* This resource is +-// intended for use only by third-party partners who are creating Cloud +-// Marketplace images. ++// Delete: Deletes the specified Interconnect. + // +-// - license: Name of the license resource to delete. ++// - interconnect: Name of the interconnect to delete. + // - project: Project ID for this request. +-func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { +- c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { ++ c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.license = license ++ c.interconnect = interconnect + return c + } + +@@ -116079,7 +120495,7 @@ func (r *LicensesService) Delete(project string, license string) *LicensesDelete + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { ++func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -116087,7 +120503,7 @@ func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { ++func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -116095,21 +120511,21 @@ func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { ++func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesDeleteCall) Header() http.Header { ++func (c *InterconnectsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -116119,7 +120535,7 @@ func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { +@@ -116127,20 +120543,20 @@ func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "license": c.license, ++ "project": c.project, ++ "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.delete" call. ++// Do executes the "compute.interconnects.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -116171,17 +120587,17 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error + } + return ret, nil + // { +- // "description": "Deletes the specified license. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses/{license}", ++ // "description": "Deletes the specified Interconnect.", ++ // "flatPath": "projects/{project}/global/interconnects/{interconnect}", + // "httpMethod": "DELETE", +- // "id": "compute.licenses.delete", ++ // "id": "compute.interconnects.delete", + // "parameterOrder": [ + // "project", +- // "license" ++ // "interconnect" + // ], + // "parameters": { +- // "license": { +- // "description": "Name of the license resource to delete.", ++ // "interconnect": { ++ // "description": "Name of the interconnect to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -116200,7 +120616,7 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses/{license}", ++ // "path": "projects/{project}/global/interconnects/{interconnect}", + // "response": { + // "$ref": "Operation" + // }, +@@ -116212,35 +120628,34 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error + + } + +-// method id "compute.licenses.get": ++// method id "compute.interconnects.get": + +-type LicensesGetCall struct { ++type InterconnectsGetCall struct { + s *Service + project string +- license string ++ interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// Get: Returns the specified License resource. *Caution* This resource +-// is intended for use only by third-party partners who are creating +-// Cloud Marketplace images. ++// Get: Returns the specified Interconnect. Get a list of available ++// Interconnects by making a list() request. + // +-// - license: Name of the License resource to return. ++// - interconnect: Name of the interconnect to return. + // - project: Project ID for this request. +-func (r *LicensesService) Get(project string, license string) *LicensesGetCall { +- c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { ++ c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.license = license ++ c.interconnect = interconnect + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { ++func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -116250,7 +120665,7 @@ func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { ++func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -116258,21 +120673,21 @@ func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { ++func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesGetCall) Header() http.Header { ++func (c *InterconnectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -116285,7 +120700,7 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -116293,20 +120708,20 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "license": c.license, ++ "project": c.project, ++ "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.get" call. +-// Exactly one of *License or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *License.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { ++// Do executes the "compute.interconnects.get" call. ++// Exactly one of *Interconnect or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Interconnect.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -116325,7 +120740,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &License{ ++ ret := &Interconnect{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -116337,17 +120752,17 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + } + return ret, nil + // { +- // "description": "Returns the specified License resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses/{license}", ++ // "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", ++ // "flatPath": "projects/{project}/global/interconnects/{interconnect}", + // "httpMethod": "GET", +- // "id": "compute.licenses.get", ++ // "id": "compute.interconnects.get", + // "parameterOrder": [ + // "project", +- // "license" ++ // "interconnect" + // ], + // "parameters": { +- // "license": { +- // "description": "Name of the License resource to return.", ++ // "interconnect": { ++ // "description": "Name of the interconnect to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -116361,9 +120776,9 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses/{license}", ++ // "path": "projects/{project}/global/interconnects/{interconnect}", + // "response": { +- // "$ref": "License" ++ // "$ref": "Interconnect" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -116374,43 +120789,34 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + + } + +-// method id "compute.licenses.getIamPolicy": ++// method id "compute.interconnects.getDiagnostics": + +-type LicensesGetIamPolicyCall struct { ++type InterconnectsGetDiagnosticsCall struct { + s *Service + project string +- resource string ++ interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// GetIamPolicy: Gets the access control policy for a resource. May be +-// empty if no such policy or resource exists. *Caution* This resource +-// is intended for use only by third-party partners who are creating +-// Cloud Marketplace images. ++// GetDiagnostics: Returns the interconnectDiagnostics for the specified ++// Interconnect. + // ++// - interconnect: Name of the interconnect resource to query. + // - project: Project ID for this request. +-// - resource: Name or id of the resource for this request. +-func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { +- c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { ++ c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.resource = resource +- return c +-} +- +-// OptionsRequestedPolicyVersion sets the optional parameter +-// "optionsRequestedPolicyVersion": Requested IAM Policy version. +-func (c *LicensesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *LicensesGetIamPolicyCall { +- c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) ++ c.interconnect = interconnect + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { ++func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -116420,7 +120826,7 @@ func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamP + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { ++func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -116428,21 +120834,21 @@ func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIam + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { ++func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesGetIamPolicyCall) Header() http.Header { ++func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -116455,7 +120861,7 @@ func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/getIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}/getDiagnostics") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -116463,20 +120869,21 @@ func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "resource": c.resource, ++ "project": c.project, ++ "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.getIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++// Do executes the "compute.interconnects.getDiagnostics" call. ++// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -116495,7 +120902,7 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Policy{ ++ ret := &InterconnectsGetDiagnosticsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -116507,39 +120914,33 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + } + return ret, nil + // { +- // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ // "description": "Returns the interconnectDiagnostics for the specified Interconnect.", ++ // "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + // "httpMethod": "GET", +- // "id": "compute.licenses.getIamPolicy", ++ // "id": "compute.interconnects.getDiagnostics", + // "parameterOrder": [ + // "project", +- // "resource" ++ // "interconnect" + // ], + // "parameters": { +- // "optionsRequestedPolicyVersion": { +- // "description": "Requested IAM Policy version.", +- // "format": "int32", +- // "location": "query", +- // "type": "integer" +- // }, +- // "project": { +- // "description": "Project ID for this request.", ++ // "interconnect": { ++ // "description": "Name of the interconnect resource to query.", + // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", ++ // "project": { ++ // "description": "Project ID for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ // "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + // "response": { +- // "$ref": "Policy" ++ // "$ref": "InterconnectsGetDiagnosticsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -116550,26 +120951,25 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + + } + +-// method id "compute.licenses.insert": ++// method id "compute.interconnects.insert": + +-type LicensesInsertCall struct { +- s *Service +- project string +- license *License +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InterconnectsInsertCall struct { ++ s *Service ++ project string ++ interconnect *Interconnect ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Create a License resource in the specified project. *Caution* +-// This resource is intended for use only by third-party partners who +-// are creating Cloud Marketplace images. ++// Insert: Creates an Interconnect in the specified project using the ++// data included in the request. + // + // - project: Project ID for this request. +-func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { +- c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { ++ c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.license = license ++ c.interconnect = interconnect + return c + } + +@@ -116584,7 +120984,7 @@ func (r *LicensesService) Insert(project string, license *License) *LicensesInse + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { ++func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -116592,7 +120992,7 @@ func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { ++func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -116600,21 +121000,21 @@ func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { ++func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesInsertCall) Header() http.Header { ++func (c *InterconnectsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -116622,14 +121022,14 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -116642,14 +121042,14 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.insert" call. ++// Do executes the "compute.interconnects.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -116680,10 +121080,10 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error + } + return ret, nil + // { +- // "description": "Create a License resource in the specified project. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses", ++ // "description": "Creates an Interconnect in the specified project using the data included in the request.", ++ // "flatPath": "projects/{project}/global/interconnects", + // "httpMethod": "POST", +- // "id": "compute.licenses.insert", ++ // "id": "compute.interconnects.insert", + // "parameterOrder": [ + // "project" + // ], +@@ -116701,27 +121101,24 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses", ++ // "path": "projects/{project}/global/interconnects", + // "request": { +- // "$ref": "License" ++ // "$ref": "Interconnect" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/devstorage.full_control", +- // "https://www.googleapis.com/auth/devstorage.read_only", +- // "https://www.googleapis.com/auth/devstorage.read_write" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.licenses.list": ++// method id "compute.interconnects.list": + +-type LicensesListCall struct { ++type InterconnectsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -116730,18 +121127,12 @@ type LicensesListCall struct { + header_ http.Header + } + +-// List: Retrieves the list of licenses available in the specified +-// project. This method does not get any licenses that belong to other +-// projects, including licenses attached to publicly-available images, +-// like Debian 9. If you want to get a list of publicly-available +-// licenses, use this method to make a request to the respective image +-// project, such as debian-cloud or windows-cloud. *Caution* This +-// resource is intended for use only by third-party partners who are +-// creating Cloud Marketplace images. ++// List: Retrieves the list of Interconnects available to the specified ++// project. + // + // - project: Project ID for this request. +-func (r *LicensesService) List(project string) *LicensesListCall { +- c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) List(project string) *InterconnectsListCall { ++ c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -116781,7 +121172,7 @@ func (r *LicensesService) List(project string) *LicensesListCall { + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *LicensesListCall) Filter(filter string) *LicensesListCall { ++func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -116792,7 +121183,7 @@ func (c *LicensesListCall) Filter(filter string) *LicensesListCall { + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { ++func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -116806,7 +121197,7 @@ func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { ++func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -116814,7 +121205,7 @@ func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { ++func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -116823,7 +121214,7 @@ func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *LicensesListCall { ++func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -116831,7 +121222,7 @@ func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *Lice + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { ++func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -116841,7 +121232,7 @@ func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { ++func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -116849,21 +121240,21 @@ func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { ++func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesListCall) Header() http.Header { ++func (c *InterconnectsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -116876,7 +121267,7 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -116889,14 +121280,14 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.list" call. +-// Exactly one of *LicensesListResponse or error will be non-nil. Any ++// Do executes the "compute.interconnects.list" call. ++// Exactly one of *InterconnectList or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either +-// *LicensesListResponse.ServerResponse.Header or (if a response was ++// *InterconnectList.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { ++func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -116915,7 +121306,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &LicensesListResponse{ ++ ret := &InterconnectList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -116927,10 +121318,10 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon + } + return ret, nil + // { +- // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses", ++ // "description": "Retrieves the list of Interconnects available to the specified project.", ++ // "flatPath": "projects/{project}/global/interconnects", + // "httpMethod": "GET", +- // "id": "compute.licenses.list", ++ // "id": "compute.interconnects.list", + // "parameterOrder": [ + // "project" + // ], +@@ -116971,9 +121362,9 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/licenses", ++ // "path": "projects/{project}/global/interconnects", + // "response": { +- // "$ref": "LicensesListResponse" ++ // "$ref": "InterconnectList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -116987,7 +121378,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { ++func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -117005,37 +121396,213 @@ func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListRespon + } + } + +-// method id "compute.licenses.setIamPolicy": ++// method id "compute.interconnects.patch": + +-type LicensesSetIamPolicyCall struct { ++type InterconnectsPatchCall struct { ++ s *Service ++ project string ++ interconnect string ++ interconnect2 *Interconnect ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Patch: Updates the specified Interconnect with the data included in ++// the request. This method supports PATCH semantics and uses the JSON ++// merge patch format and processing rules. ++// ++// - interconnect: Name of the interconnect to update. ++// - project: Project ID for this request. ++func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { ++ c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.interconnect = interconnect ++ c.interconnect2 = interconnect2 ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InterconnectsPatchCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("PATCH", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "interconnect": c.interconnect, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.interconnects.patch" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ // "httpMethod": "PATCH", ++ // "id": "compute.interconnects.patch", ++ // "parameterOrder": [ ++ // "project", ++ // "interconnect" ++ // ], ++ // "parameters": { ++ // "interconnect": { ++ // "description": "Name of the interconnect to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/interconnects/{interconnect}", ++ // "request": { ++ // "$ref": "Interconnect" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.interconnects.setLabels": ++ ++type InterconnectsSetLabelsCall struct { + s *Service + project string + resource string +- globalsetpolicyrequest *GlobalSetPolicyRequest ++ globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// SetIamPolicy: Sets the access control policy on the specified +-// resource. Replaces any existing policy. *Caution* This resource is +-// intended for use only by third-party partners who are creating Cloud +-// Marketplace images. ++// SetLabels: Sets the labels on an Interconnect. To learn more about ++// labels, read the Labeling Resources documentation. + // + // - project: Project ID for this request. + // - resource: Name or id of the resource for this request. +-func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { +- c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { ++ c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource +- c.globalsetpolicyrequest = globalsetpolicyrequest ++ c.globalsetlabelsrequest = globalsetlabelsrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { ++func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117043,21 +121610,21 @@ func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamP + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { ++func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesSetIamPolicyCall) Header() http.Header { ++func (c *InterconnectsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117065,14 +121632,14 @@ func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/setIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -117086,14 +121653,14 @@ func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.setIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++// Do executes the "compute.interconnects.setLabels" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117112,7 +121679,7 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Policy{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -117124,10 +121691,10 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + } + return ret, nil + // { +- // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", ++ // "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", + // "httpMethod": "POST", +- // "id": "compute.licenses.setIamPolicy", ++ // "id": "compute.interconnects.setLabels", + // "parameterOrder": [ + // "project", + // "resource" +@@ -117148,12 +121715,12 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ // "path": "projects/{project}/global/interconnects/{resource}/setLabels", + // "request": { +- // "$ref": "GlobalSetPolicyRequest" ++ // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { +- // "$ref": "Policy" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -117163,9 +121730,9 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + + } + +-// method id "compute.licenses.testIamPermissions": ++// method id "compute.interconnects.testIamPermissions": + +-type LicensesTestIamPermissionsCall struct { ++type InterconnectsTestIamPermissionsCall struct { + s *Service + project string + resource string +@@ -117176,13 +121743,12 @@ type LicensesTestIamPermissionsCall struct { + } + + // TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. *Caution* This resource is intended for use only +-// by third-party partners who are creating Cloud Marketplace images. ++// specified resource. + // + // - project: Project ID for this request. + // - resource: Name or id of the resource for this request. +-func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { +- c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectsTestIamPermissionsCall { ++ c := &InterconnectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest +@@ -117192,7 +121758,7 @@ func (r *LicensesService) TestIamPermissions(project string, resource string, te + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { ++func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117200,21 +121766,21 @@ func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesT + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { ++func (c *InterconnectsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectsTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesTestIamPermissionsCall) Header() http.Header { ++func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117229,7 +121795,7 @@ func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -117243,14 +121809,14 @@ func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.testIamPermissions" call. ++// Do executes the "compute.interconnects.testIamPermissions" call. + // Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either + // *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117281,10 +121847,10 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/global/interconnects/{resource}/testIamPermissions", + // "httpMethod": "POST", +- // "id": "compute.licenses.testIamPermissions", ++ // "id": "compute.interconnects.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" +@@ -117305,7 +121871,7 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ // "path": "projects/{project}/global/interconnects/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, +@@ -117321,26 +121887,349 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test + + } + +-// method id "compute.machineImages.delete": ++// method id "compute.licenseCodes.get": + +-type MachineImagesDeleteCall struct { ++type LicenseCodesGetCall struct { + s *Service + project string +- machineImage string ++ licenseCode string + urlParams_ gensupport.URLParams ++ ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// Delete: Deletes the specified machine image. Deleting a machine image +-// is permanent and cannot be undone. ++// Get: Return a specified license code. License codes are mirrored ++// across all projects that have permissions to read the License Code. ++// *Caution* This resource is intended for use only by third-party ++// partners who are creating Cloud Marketplace images. ++// ++// - licenseCode: Number corresponding to the License code resource to ++// return. ++// - project: Project ID for this request. ++func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { ++ c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.licenseCode = licenseCode ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *LicenseCodesGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{licenseCode}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "licenseCode": c.licenseCode, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.licenseCodes.get" call. ++// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *LicenseCode.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &LicenseCode{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenseCodes/{licenseCode}", ++ // "httpMethod": "GET", ++ // "id": "compute.licenseCodes.get", ++ // "parameterOrder": [ ++ // "project", ++ // "licenseCode" ++ // ], ++ // "parameters": { ++ // "licenseCode": { ++ // "description": "Number corresponding to the License code resource to return.", ++ // "location": "path", ++ // "pattern": "[0-9]{0,61}?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/licenseCodes/{licenseCode}", ++ // "response": { ++ // "$ref": "LicenseCode" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.licenseCodes.testIamPermissions": ++ ++type LicenseCodesTestIamPermissionsCall struct { ++ s *Service ++ project string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. *Caution* This resource is intended for use only ++// by third-party partners who are creating Cloud Marketplace images. + // +-// - machineImage: The name of the machine image to delete. + // - project: Project ID for this request. +-func (r *MachineImagesService) Delete(project string, machineImage string) *MachineImagesDeleteCall { +- c := &MachineImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - resource: Name or id of the resource for this request. ++func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { ++ c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.machineImage = machineImage ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.licenseCodes.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.licenseCodes.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.licenses.delete": ++ ++type LicensesDeleteCall struct { ++ s *Service ++ project string ++ license string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Delete: Deletes the specified license. *Caution* This resource is ++// intended for use only by third-party partners who are creating Cloud ++// Marketplace images. ++// ++// - license: Name of the license resource to delete. ++// - project: Project ID for this request. ++func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { ++ c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.license = license + return c + } + +@@ -117355,7 +122244,7 @@ func (r *MachineImagesService) Delete(project string, machineImage string) *Mach + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *MachineImagesDeleteCall) RequestId(requestId string) *MachineImagesDeleteCall { ++func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -117363,7 +122252,7 @@ func (c *MachineImagesDeleteCall) RequestId(requestId string) *MachineImagesDele + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesDeleteCall) Fields(s ...googleapi.Field) *MachineImagesDeleteCall { ++func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117371,21 +122260,21 @@ func (c *MachineImagesDeleteCall) Fields(s ...googleapi.Field) *MachineImagesDel + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesDeleteCall) Context(ctx context.Context) *MachineImagesDeleteCall { ++func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesDeleteCall) Header() http.Header { ++func (c *LicensesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117395,7 +122284,7 @@ func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { +@@ -117403,20 +122292,20 @@ func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "machineImage": c.machineImage, ++ "project": c.project, ++ "license": c.license, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.delete" call. ++// Do executes the "compute.licenses.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117447,17 +122336,17 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + } + return ret, nil + // { +- // "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", +- // "flatPath": "projects/{project}/global/machineImages/{machineImage}", ++ // "description": "Deletes the specified license. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses/{license}", + // "httpMethod": "DELETE", +- // "id": "compute.machineImages.delete", ++ // "id": "compute.licenses.delete", + // "parameterOrder": [ + // "project", +- // "machineImage" ++ // "license" + // ], + // "parameters": { +- // "machineImage": { +- // "description": "The name of the machine image to delete.", ++ // "license": { ++ // "description": "Name of the license resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -117476,7 +122365,7 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages/{machineImage}", ++ // "path": "projects/{project}/global/licenses/{license}", + // "response": { + // "$ref": "Operation" + // }, +@@ -117488,33 +122377,35 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + +-// method id "compute.machineImages.get": ++// method id "compute.licenses.get": + +-type MachineImagesGetCall struct { ++type LicensesGetCall struct { + s *Service + project string +- machineImage string ++ license string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// Get: Returns the specified machine image. ++// Get: Returns the specified License resource. *Caution* This resource ++// is intended for use only by third-party partners who are creating ++// Cloud Marketplace images. + // +-// - machineImage: The name of the machine image. ++// - license: Name of the License resource to return. + // - project: Project ID for this request. +-func (r *MachineImagesService) Get(project string, machineImage string) *MachineImagesGetCall { +- c := &MachineImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) Get(project string, license string) *LicensesGetCall { ++ c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.machineImage = machineImage ++ c.license = license + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCall { ++func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117524,7 +122415,7 @@ func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCal + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCall { ++func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -117532,21 +122423,21 @@ func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCa + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesGetCall) Context(ctx context.Context) *MachineImagesGetCall { ++func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesGetCall) Header() http.Header { ++func (c *LicensesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117559,7 +122450,7 @@ func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -117567,20 +122458,20 @@ func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "machineImage": c.machineImage, ++ "project": c.project, ++ "license": c.license, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.get" call. +-// Exactly one of *MachineImage or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *MachineImage.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, error) { ++// Do executes the "compute.licenses.get" call. ++// Exactly one of *License or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *License.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117599,7 +122490,7 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &MachineImage{ ++ ret := &License{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -117611,17 +122502,17 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, + } + return ret, nil + // { +- // "description": "Returns the specified machine image.", +- // "flatPath": "projects/{project}/global/machineImages/{machineImage}", ++ // "description": "Returns the specified License resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses/{license}", + // "httpMethod": "GET", +- // "id": "compute.machineImages.get", ++ // "id": "compute.licenses.get", + // "parameterOrder": [ + // "project", +- // "machineImage" ++ // "license" + // ], + // "parameters": { +- // "machineImage": { +- // "description": "The name of the machine image.", ++ // "license": { ++ // "description": "Name of the License resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -117635,9 +122526,9 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages/{machineImage}", ++ // "path": "projects/{project}/global/licenses/{license}", + // "response": { +- // "$ref": "MachineImage" ++ // "$ref": "License" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -117648,9 +122539,9 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, + + } + +-// method id "compute.machineImages.getIamPolicy": ++// method id "compute.licenses.getIamPolicy": + +-type MachineImagesGetIamPolicyCall struct { ++type LicensesGetIamPolicyCall struct { + s *Service + project string + resource string +@@ -117661,12 +122552,14 @@ type MachineImagesGetIamPolicyCall struct { + } + + // GetIamPolicy: Gets the access control policy for a resource. May be +-// empty if no such policy or resource exists. ++// empty if no such policy or resource exists. *Caution* This resource ++// is intended for use only by third-party partners who are creating ++// Cloud Marketplace images. + // + // - project: Project ID for this request. + // - resource: Name or id of the resource for this request. +-func (r *MachineImagesService) GetIamPolicy(project string, resource string) *MachineImagesGetIamPolicyCall { +- c := &MachineImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { ++ c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + return c +@@ -117674,7 +122567,7 @@ func (r *MachineImagesService) GetIamPolicy(project string, resource string) *Ma + + // OptionsRequestedPolicyVersion sets the optional parameter + // "optionsRequestedPolicyVersion": Requested IAM Policy version. +-func (c *MachineImagesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *MachineImagesGetIamPolicyCall { ++func (c *LicensesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *LicensesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c + } +@@ -117682,7 +122575,7 @@ func (c *MachineImagesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsReq + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesGetIamPolicyCall { ++func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117692,7 +122585,7 @@ func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineIma + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineImagesGetIamPolicyCall { ++func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -117700,21 +122593,21 @@ func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineIm + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesGetIamPolicyCall) Context(ctx context.Context) *MachineImagesGetIamPolicyCall { ++func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesGetIamPolicyCall) Header() http.Header { ++func (c *LicensesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117727,7 +122620,7 @@ func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, e + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/getIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -117741,14 +122634,14 @@ func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, e + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.getIamPolicy" call. ++// Do executes the "compute.licenses.getIamPolicy" call. + // Exactly one of *Policy or error will be non-nil. Any non-2xx status + // code is an error. Response headers are in either + // *Policy.ServerResponse.Header or (if a response was returned at all) + // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to + // check whether the returned error was because http.StatusNotModified + // was returned. +-func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117779,10 +122672,10 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + } + return ret, nil + // { +- // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", +- // "flatPath": "projects/{project}/global/machineImages/{resource}/getIamPolicy", ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses/{resource}/getIamPolicy", + // "httpMethod": "GET", +- // "id": "compute.machineImages.getIamPolicy", ++ // "id": "compute.licenses.getIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" +@@ -117809,7 +122702,7 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages/{resource}/getIamPolicy", ++ // "path": "projects/{project}/global/licenses/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, +@@ -117822,28 +122715,26 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + + } + +-// method id "compute.machineImages.insert": ++// method id "compute.licenses.insert": + +-type MachineImagesInsertCall struct { +- s *Service +- project string +- machineimage *MachineImage +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type LicensesInsertCall struct { ++ s *Service ++ project string ++ license *License ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates a machine image in the specified project using the +-// data that is included in the request. If you are creating a new +-// machine image to update an existing instance, your new machine image +-// should use the same network or, if applicable, the same subnetwork as +-// the original instance. ++// Insert: Create a License resource in the specified project. *Caution* ++// This resource is intended for use only by third-party partners who ++// are creating Cloud Marketplace images. + // + // - project: Project ID for this request. +-func (r *MachineImagesService) Insert(project string, machineimage *MachineImage) *MachineImagesInsertCall { +- c := &MachineImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { ++ c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.machineimage = machineimage ++ c.license = license + return c + } + +@@ -117858,23 +122749,15 @@ func (r *MachineImagesService) Insert(project string, machineimage *MachineImage + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *MachineImagesInsertCall) RequestId(requestId string) *MachineImagesInsertCall { ++func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } + +-// SourceInstance sets the optional parameter "sourceInstance": +-// Required. Source instance that is used to create the machine image +-// from. +-func (c *MachineImagesInsertCall) SourceInstance(sourceInstance string) *MachineImagesInsertCall { +- c.urlParams_.Set("sourceInstance", sourceInstance) +- return c +-} +- + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesInsertCall) Fields(s ...googleapi.Field) *MachineImagesInsertCall { ++func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117882,21 +122765,21 @@ func (c *MachineImagesInsertCall) Fields(s ...googleapi.Field) *MachineImagesIns + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesInsertCall) Context(ctx context.Context) *MachineImagesInsertCall { ++func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesInsertCall) Header() http.Header { ++func (c *LicensesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117904,14 +122787,14 @@ func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.machineimage) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -117924,14 +122807,14 @@ func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.insert" call. ++// Do executes the "compute.licenses.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117962,10 +122845,10 @@ func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, + } + return ret, nil + // { +- // "description": "Creates a machine image in the specified project using the data that is included in the request. If you are creating a new machine image to update an existing instance, your new machine image should use the same network or, if applicable, the same subnetwork as the original instance.", +- // "flatPath": "projects/{project}/global/machineImages", ++ // "description": "Create a License resource in the specified project. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses", + // "httpMethod": "POST", +- // "id": "compute.machineImages.insert", ++ // "id": "compute.licenses.insert", + // "parameterOrder": [ + // "project" + // ], +@@ -117981,31 +122864,29 @@ func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" +- // }, +- // "sourceInstance": { +- // "description": "Required. Source instance that is used to create the machine image from.", +- // "location": "query", +- // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages", ++ // "path": "projects/{project}/global/licenses", + // "request": { +- // "$ref": "MachineImage" ++ // "$ref": "License" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/devstorage.full_control", ++ // "https://www.googleapis.com/auth/devstorage.read_only", ++ // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + + } + +-// method id "compute.machineImages.list": ++// method id "compute.licenses.list": + +-type MachineImagesListCall struct { ++type LicensesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -118014,12 +122895,18 @@ type MachineImagesListCall struct { + header_ http.Header + } + +-// List: Retrieves a list of machine images that are contained within +-// the specified project. ++// List: Retrieves the list of licenses available in the specified ++// project. This method does not get any licenses that belong to other ++// projects, including licenses attached to publicly-available images, ++// like Debian 9. If you want to get a list of publicly-available ++// licenses, use this method to make a request to the respective image ++// project, such as debian-cloud or windows-cloud. *Caution* This ++// resource is intended for use only by third-party partners who are ++// creating Cloud Marketplace images. + // + // - project: Project ID for this request. +-func (r *MachineImagesService) List(project string) *MachineImagesListCall { +- c := &MachineImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) List(project string) *LicensesListCall { ++ c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -118059,7 +122946,7 @@ func (r *MachineImagesService) List(project string) *MachineImagesListCall { + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { ++func (c *LicensesListCall) Filter(filter string) *LicensesListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -118070,7 +122957,7 @@ func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListCall { ++func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -118084,7 +122971,7 @@ func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListC + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { ++func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -118092,7 +122979,7 @@ func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCall { ++func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -118101,7 +122988,7 @@ func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCa + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *MachineImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineImagesListCall { ++func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *LicensesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -118109,7 +122996,7 @@ func (c *MachineImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListCall { ++func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -118119,7 +123006,7 @@ func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListC + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesListCall { ++func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -118127,21 +123014,21 @@ func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesList + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesListCall) Context(ctx context.Context) *MachineImagesListCall { ++func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesListCall) Header() http.Header { ++func (c *LicensesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -118154,7 +123041,7 @@ func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -118167,14 +123054,14 @@ func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.list" call. +-// Exactly one of *MachineImageList or error will be non-nil. Any ++// Do executes the "compute.licenses.list" call. ++// Exactly one of *LicensesListResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either +-// *MachineImageList.ServerResponse.Header or (if a response was ++// *LicensesListResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageList, error) { ++func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -118193,7 +123080,7 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &MachineImageList{ ++ ret := &LicensesListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -118205,10 +123092,10 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL + } + return ret, nil + // { +- // "description": "Retrieves a list of machine images that are contained within the specified project.", +- // "flatPath": "projects/{project}/global/machineImages", ++ // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses", + // "httpMethod": "GET", +- // "id": "compute.machineImages.list", ++ // "id": "compute.licenses.list", + // "parameterOrder": [ + // "project" + // ], +@@ -118249,9 +123136,9 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/machineImages", ++ // "path": "projects/{project}/global/licenses", + // "response": { +- // "$ref": "MachineImageList" ++ // "$ref": "LicensesListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -118265,7 +123152,7 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageList) error) error { ++func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -118283,9 +123170,9 @@ func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageL + } + } + +-// method id "compute.machineImages.setIamPolicy": ++// method id "compute.licenses.setIamPolicy": + +-type MachineImagesSetIamPolicyCall struct { ++type LicensesSetIamPolicyCall struct { + s *Service + project string + resource string +@@ -118296,12 +123183,14 @@ type MachineImagesSetIamPolicyCall struct { + } + + // SetIamPolicy: Sets the access control policy on the specified +-// resource. Replaces any existing policy. ++// resource. Replaces any existing policy. *Caution* This resource is ++// intended for use only by third-party partners who are creating Cloud ++// Marketplace images. + // + // - project: Project ID for this request. + // - resource: Name or id of the resource for this request. +-func (r *MachineImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *MachineImagesSetIamPolicyCall { +- c := &MachineImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { ++ c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest +@@ -118311,7 +123200,7 @@ func (r *MachineImagesService) SetIamPolicy(project string, resource string, glo + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesSetIamPolicyCall { ++func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -118319,21 +123208,21 @@ func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineIma + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesSetIamPolicyCall) Context(ctx context.Context) *MachineImagesSetIamPolicyCall { ++func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesSetIamPolicyCall) Header() http.Header { ++func (c *LicensesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -118348,7 +123237,7 @@ func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, e + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/setIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -118362,14 +123251,14 @@ func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, e + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.setIamPolicy" call. ++// Do executes the "compute.licenses.setIamPolicy" call. + // Exactly one of *Policy or error will be non-nil. Any non-2xx status + // code is an error. Response headers are in either + // *Policy.ServerResponse.Header or (if a response was returned at all) + // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to + // check whether the returned error was because http.StatusNotModified + // was returned. +-func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -118400,10 +123289,10 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + } + return ret, nil + // { +- // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", +- // "flatPath": "projects/{project}/global/machineImages/{resource}/setIamPolicy", ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses/{resource}/setIamPolicy", + // "httpMethod": "POST", +- // "id": "compute.machineImages.setIamPolicy", ++ // "id": "compute.licenses.setIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" +@@ -118424,7 +123313,7 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages/{resource}/setIamPolicy", ++ // "path": "projects/{project}/global/licenses/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, +@@ -118439,9 +123328,9 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + + } + +-// method id "compute.machineImages.testIamPermissions": ++// method id "compute.licenses.testIamPermissions": + +-type MachineImagesTestIamPermissionsCall struct { ++type LicensesTestIamPermissionsCall struct { + s *Service + project string + resource string +@@ -118452,12 +123341,13 @@ type MachineImagesTestIamPermissionsCall struct { + } + + // TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. ++// specified resource. *Caution* This resource is intended for use only ++// by third-party partners who are creating Cloud Marketplace images. + // + // - project: Project ID for this request. + // - resource: Name or id of the resource for this request. +-func (r *MachineImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *MachineImagesTestIamPermissionsCall { +- c := &MachineImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { ++ c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest +@@ -118467,7 +123357,7 @@ func (r *MachineImagesService) TestIamPermissions(project string, resource strin + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *MachineImagesTestIamPermissionsCall { ++func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -118475,21 +123365,21 @@ func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Mach + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesTestIamPermissionsCall) Context(ctx context.Context) *MachineImagesTestIamPermissionsCall { ++func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesTestIamPermissionsCall) Header() http.Header { ++func (c *LicensesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -118504,7 +123394,7 @@ func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Respo + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -118518,14 +123408,14 @@ func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Respo + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.testIamPermissions" call. ++// Do executes the "compute.licenses.testIamPermissions" call. + // Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either + // *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -118556,10 +123446,10 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/global/machineImages/{resource}/testIamPermissions", ++ // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses/{resource}/testIamPermissions", + // "httpMethod": "POST", +- // "id": "compute.machineImages.testIamPermissions", ++ // "id": "compute.licenses.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" +@@ -118580,7 +123470,7 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages/{resource}/testIamPermissions", ++ // "path": "projects/{project}/global/licenses/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, +@@ -118596,125 +123486,200 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + + } + +-// method id "compute.machineTypes.aggregatedList": ++// method id "compute.machineImages.delete": + +-type MachineTypesAggregatedListCall struct { ++type MachineImagesDeleteCall struct { + s *Service + project string ++ machineImage string + urlParams_ gensupport.URLParams +- ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// AggregatedList: Retrieves an aggregated list of machine types. ++// Delete: Deletes the specified machine image. Deleting a machine image ++// is permanent and cannot be undone. + // ++// - machineImage: The name of the machine image to delete. + // - project: Project ID for this request. +-func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { +- c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *MachineImagesService) Delete(project string, machineImage string) *MachineImagesDeleteCall { ++ c := &MachineImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project ++ c.machineImage = machineImage + return c + } + +-// Filter sets the optional parameter "filter": A filter expression that +-// filters resources listed in the response. Most Compute resources +-// support two types of filter expressions: expressions that support +-// regular expressions and expressions that follow API improvement +-// proposal AIP-160. If you want to use AIP-160, your expression must +-// specify the field name, an operator, and the value that you want to +-// use for filtering. The value must be a string, a number, or a +-// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +-// or `:`. For example, if you are filtering Compute Engine instances, +-// you can exclude instances named `example-instance` by specifying +-// `name != example-instance`. The `:` operator can be used with string +-// fields to match substrings. For non-string fields it is equivalent to +-// the `=` operator. The `:*` comparison can be used to test whether a +-// key has been defined. For example, to find all objects with `owner` +-// label use: ``` labels.owner:* ``` You can also filter nested fields. +-// For example, you could specify `scheduling.automaticRestart = false` +-// to include instances only if they are not scheduled for automatic +-// restarts. You can use filtering on nested fields to filter based on +-// resource labels. To filter on multiple expressions, provide each +-// separate expression within parentheses. For example: ``` +-// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +-// ``` By default, each expression is an `AND` expression. However, you +-// can include `AND` and `OR` expressions explicitly. For example: ``` +-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +-// AND (scheduling.automaticRestart = true) ``` If you want to use a +-// regular expression, use the `eq` (equal) or `ne` (not equal) operator +-// against a single un-parenthesized expression with or without quotes +-// or against multiple parenthesized expressions. Examples: `fieldname +-// eq unquoted literal` `fieldname eq 'single quoted literal'` +-// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +-// (fieldname2 ne "literal")` The literal value is interpreted as a +-// regular expression using Google RE2 library syntax. The literal value +-// must match the entire field. For example, to filter for instances +-// that do not end with name "instance", you would use `name ne +-// .*instance`. +-func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("filter", filter) ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *MachineImagesDeleteCall) RequestId(requestId string) *MachineImagesDeleteCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + +-// IncludeAllScopes sets the optional parameter "includeAllScopes": +-// Indicates whether every visible scope for each scope type (zone, +-// region, global) should be included in the response. For new resource +-// types added after this field, the flag has no effect as new resource +-// types will always include every visible scope for each scope type in +-// response. For resource types which predate this field, if this flag +-// is omitted or false, only scopes of the scope types where the +-// resource type is expected to be found will be included. +-func (c *MachineTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *MachineImagesDeleteCall) Fields(s ...googleapi.Field) *MachineImagesDeleteCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// MaxResults sets the optional parameter "maxResults": The maximum +-// number of results per page that should be returned. If the number of +-// available results is larger than `maxResults`, Compute Engine returns +-// a `nextPageToken` that can be used to get the next page of results in +-// subsequent list requests. Acceptable values are `0` to `500`, +-// inclusive. (Default: `500`) +-func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *MachineImagesDeleteCall) Context(ctx context.Context) *MachineImagesDeleteCall { ++ c.ctx_ = ctx + return c + } + +-// OrderBy sets the optional parameter "orderBy": Sorts list results by +-// a certain order. By default, results are returned in alphanumerical +-// order based on the resource name. You can also sort results in +-// descending order based on the creation timestamp using +-// `orderBy="creationTimestamp desc". This sorts results based on the +-// `creationTimestamp` field in reverse chronological order (newest +-// result first). Use this to sort resources like operations so that the +-// newest operation is returned first. Currently, only sorting by `name` +-// or `creationTimestamp desc` is supported. +-func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("orderBy", orderBy) +- return c ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *MachineImagesDeleteCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ + } + +-// PageToken sets the optional parameter "pageToken": Specifies a page +-// token to use. Set `pageToken` to the `nextPageToken` returned by a +-// previous list request to get the next page of results. +-func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("pageToken", pageToken) +- return c ++func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("DELETE", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "machineImage": c.machineImage, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// ReturnPartialSuccess sets the optional parameter +-// "returnPartialSuccess": Opt-in for partial success behavior which +-// provides partial results in case of failure. The default value is +-// false. +-func (c *MachineTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++// Do executes the "compute.machineImages.delete" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", ++ // "flatPath": "projects/{project}/global/machineImages/{machineImage}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.machineImages.delete", ++ // "parameterOrder": [ ++ // "project", ++ // "machineImage" ++ // ], ++ // "parameters": { ++ // "machineImage": { ++ // "description": "The name of the machine image to delete.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/machineImages/{machineImage}", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.machineImages.get": ++ ++type MachineImagesGetCall struct { ++ s *Service ++ project string ++ machineImage string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Get: Returns the specified machine image. ++// ++// - machineImage: The name of the machine image. ++// - project: Project ID for this request. ++func (r *MachineImagesService) Get(project string, machineImage string) *MachineImagesGetCall { ++ c := &MachineImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.machineImage = machineImage + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { ++func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -118724,7 +123689,7 @@ func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTy + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { ++func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -118732,21 +123697,21 @@ func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineT + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { ++func (c *MachineImagesGetCall) Context(ctx context.Context) *MachineImagesGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineTypesAggregatedListCall) Header() http.Header { ++func (c *MachineImagesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -118759,7 +123724,7 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/machineTypes") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -118767,19 +123732,20 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, ++ "project": c.project, ++ "machineImage": c.machineImage, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineTypes.aggregatedList" call. +-// Exactly one of *MachineTypeAggregatedList or error will be non-nil. +-// Any non-2xx status code is an error. Response headers are in either +-// *MachineTypeAggregatedList.ServerResponse.Header or (if a response +-// was returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { ++// Do executes the "compute.machineImages.get" call. ++// Exactly one of *MachineImage or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *MachineImage.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -118798,7 +123764,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &MachineTypeAggregatedList{ ++ ret := &MachineImage{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -118810,40 +123776,20 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach + } + return ret, nil + // { +- // "description": "Retrieves an aggregated list of machine types.", +- // "flatPath": "projects/{project}/aggregated/machineTypes", ++ // "description": "Returns the specified machine image.", ++ // "flatPath": "projects/{project}/global/machineImages/{machineImage}", + // "httpMethod": "GET", +- // "id": "compute.machineTypes.aggregatedList", ++ // "id": "compute.machineImages.get", + // "parameterOrder": [ +- // "project" ++ // "project", ++ // "machineImage" + // ], + // "parameters": { +- // "filter": { +- // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- // "location": "query", +- // "type": "string" +- // }, +- // "includeAllScopes": { +- // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", +- // "location": "query", +- // "type": "boolean" +- // }, +- // "maxResults": { +- // "default": "500", +- // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- // "format": "uint32", +- // "location": "query", +- // "minimum": "0", +- // "type": "integer" +- // }, +- // "orderBy": { +- // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- // "location": "query", +- // "type": "string" +- // }, +- // "pageToken": { +- // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +- // "location": "query", ++ // "machineImage": { ++ // "description": "The name of the machine image.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, + // "type": "string" + // }, + // "project": { +@@ -118852,16 +123798,11 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" +- // }, +- // "returnPartialSuccess": { +- // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", +- // "location": "query", +- // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/aggregated/machineTypes", ++ // "path": "projects/{project}/global/machineImages/{machineImage}", + // "response": { +- // "$ref": "MachineTypeAggregatedList" ++ // "$ref": "MachineImage" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -118872,57 +123813,41 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach + + } + +-// Pages invokes f for each page of results. +-// A non-nil error returned from f will halt the iteration. +-// The provided context supersedes any context provided to the Context method. +-func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { +- c.ctx_ = ctx +- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point +- for { +- x, err := c.Do() +- if err != nil { +- return err +- } +- if err := f(x); err != nil { +- return err +- } +- if x.NextPageToken == "" { +- return nil +- } +- c.PageToken(x.NextPageToken) +- } +-} +- +-// method id "compute.machineTypes.get": ++// method id "compute.machineImages.getIamPolicy": + +-type MachineTypesGetCall struct { ++type MachineImagesGetIamPolicyCall struct { + s *Service + project string +- zone string +- machineType string ++ resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// Get: Returns the specified machine type. ++// GetIamPolicy: Gets the access control policy for a resource. May be ++// empty if no such policy or resource exists. + // +-// - machineType: Name of the machine type to return. + // - project: Project ID for this request. +-// - zone: The name of the zone for this request. +-func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { +- c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - resource: Name or id of the resource for this request. ++func (r *MachineImagesService) GetIamPolicy(project string, resource string) *MachineImagesGetIamPolicyCall { ++ c := &MachineImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.machineType = machineType ++ c.resource = resource ++ return c ++} ++ ++// OptionsRequestedPolicyVersion sets the optional parameter ++// "optionsRequestedPolicyVersion": Requested IAM Policy version. ++func (c *MachineImagesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *MachineImagesGetIamPolicyCall { ++ c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { ++func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -118932,7 +123857,7 @@ func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { ++func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineImagesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -118940,21 +123865,21 @@ func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { ++func (c *MachineImagesGetIamPolicyCall) Context(ctx context.Context) *MachineImagesGetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineTypesGetCall) Header() http.Header { ++func (c *MachineImagesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -118967,7 +123892,7 @@ func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes/{machineType}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -118975,21 +123900,20 @@ func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "machineType": c.machineType, ++ "project": c.project, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineTypes.get" call. +-// Exactly one of *MachineType or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *MachineType.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { ++// Do executes the "compute.machineImages.getIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -119008,7 +123932,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &MachineType{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -119020,22 +123944,20 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er + } + return ret, nil + // { +- // "description": "Returns the specified machine type.", +- // "flatPath": "projects/{project}/zones/{zone}/machineTypes/{machineType}", ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ // "flatPath": "projects/{project}/global/machineImages/{resource}/getIamPolicy", + // "httpMethod": "GET", +- // "id": "compute.machineTypes.get", ++ // "id": "compute.machineImages.getIamPolicy", + // "parameterOrder": [ + // "project", +- // "zone", +- // "machineType" ++ // "resource" + // ], + // "parameters": { +- // "machineType": { +- // "description": "Name of the machine type to return.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" ++ // "optionsRequestedPolicyVersion": { ++ // "description": "Requested IAM Policy version.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", +@@ -119044,17 +123966,17 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er + // "required": true, + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone for this request.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/machineTypes/{machineType}", ++ // "path": "projects/{project}/global/machineImages/{resource}/getIamPolicy", + // "response": { +- // "$ref": "MachineType" ++ // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -119065,182 +123987,116 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er + + } + +-// method id "compute.machineTypes.list": ++// method id "compute.machineImages.insert": + +-type MachineTypesListCall struct { ++type MachineImagesInsertCall struct { + s *Service + project string +- zone string ++ machineimage *MachineImage + urlParams_ gensupport.URLParams +- ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// List: Retrieves a list of machine types available to the specified +-// project. ++// Insert: Creates a machine image in the specified project using the ++// data that is included in the request. If you are creating a new ++// machine image to update an existing instance, your new machine image ++// should use the same network or, if applicable, the same subnetwork as ++// the original instance. + // + // - project: Project ID for this request. +-// - zone: The name of the zone for this request. +-func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { +- c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *MachineImagesService) Insert(project string, machineimage *MachineImage) *MachineImagesInsertCall { ++ c := &MachineImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- return c +-} +- +-// Filter sets the optional parameter "filter": A filter expression that +-// filters resources listed in the response. Most Compute resources +-// support two types of filter expressions: expressions that support +-// regular expressions and expressions that follow API improvement +-// proposal AIP-160. If you want to use AIP-160, your expression must +-// specify the field name, an operator, and the value that you want to +-// use for filtering. The value must be a string, a number, or a +-// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +-// or `:`. For example, if you are filtering Compute Engine instances, +-// you can exclude instances named `example-instance` by specifying +-// `name != example-instance`. The `:` operator can be used with string +-// fields to match substrings. For non-string fields it is equivalent to +-// the `=` operator. The `:*` comparison can be used to test whether a +-// key has been defined. For example, to find all objects with `owner` +-// label use: ``` labels.owner:* ``` You can also filter nested fields. +-// For example, you could specify `scheduling.automaticRestart = false` +-// to include instances only if they are not scheduled for automatic +-// restarts. You can use filtering on nested fields to filter based on +-// resource labels. To filter on multiple expressions, provide each +-// separate expression within parentheses. For example: ``` +-// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +-// ``` By default, each expression is an `AND` expression. However, you +-// can include `AND` and `OR` expressions explicitly. For example: ``` +-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +-// AND (scheduling.automaticRestart = true) ``` If you want to use a +-// regular expression, use the `eq` (equal) or `ne` (not equal) operator +-// against a single un-parenthesized expression with or without quotes +-// or against multiple parenthesized expressions. Examples: `fieldname +-// eq unquoted literal` `fieldname eq 'single quoted literal'` +-// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +-// (fieldname2 ne "literal")` The literal value is interpreted as a +-// regular expression using Google RE2 library syntax. The literal value +-// must match the entire field. For example, to filter for instances +-// that do not end with name "instance", you would use `name ne +-// .*instance`. +-func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { +- c.urlParams_.Set("filter", filter) +- return c +-} +- +-// MaxResults sets the optional parameter "maxResults": The maximum +-// number of results per page that should be returned. If the number of +-// available results is larger than `maxResults`, Compute Engine returns +-// a `nextPageToken` that can be used to get the next page of results in +-// subsequent list requests. Acceptable values are `0` to `500`, +-// inclusive. (Default: `500`) +-func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { +- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +- return c +-} +- +-// OrderBy sets the optional parameter "orderBy": Sorts list results by +-// a certain order. By default, results are returned in alphanumerical +-// order based on the resource name. You can also sort results in +-// descending order based on the creation timestamp using +-// `orderBy="creationTimestamp desc". This sorts results based on the +-// `creationTimestamp` field in reverse chronological order (newest +-// result first). Use this to sort resources like operations so that the +-// newest operation is returned first. Currently, only sorting by `name` +-// or `creationTimestamp desc` is supported. +-func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { +- c.urlParams_.Set("orderBy", orderBy) ++ c.machineimage = machineimage + return c + } + +-// PageToken sets the optional parameter "pageToken": Specifies a page +-// token to use. Set `pageToken` to the `nextPageToken` returned by a +-// previous list request to get the next page of results. +-func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { +- c.urlParams_.Set("pageToken", pageToken) ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *MachineImagesInsertCall) RequestId(requestId string) *MachineImagesInsertCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + +-// ReturnPartialSuccess sets the optional parameter +-// "returnPartialSuccess": Opt-in for partial success behavior which +-// provides partial results in case of failure. The default value is +-// false. +-func (c *MachineTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesListCall { +- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++// SourceInstance sets the optional parameter "sourceInstance": ++// Required. Source instance that is used to create the machine image ++// from. ++func (c *MachineImagesInsertCall) SourceInstance(sourceInstance string) *MachineImagesInsertCall { ++ c.urlParams_.Set("sourceInstance", sourceInstance) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { ++func (c *MachineImagesInsertCall) Fields(s ...googleapi.Field) *MachineImagesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { ++func (c *MachineImagesInsertCall) Context(ctx context.Context) *MachineImagesInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineTypesListCall) Header() http.Header { ++func (c *MachineImagesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.machineimage) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineTypes.list" call. +-// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx ++// Do executes the "compute.machineImages.insert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either +-// *MachineTypeList.ServerResponse.Header or (if a response was returned +-// at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -119259,7 +124115,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &MachineTypeList{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -119271,38 +124127,14 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis + } + return ret, nil + // { +- // "description": "Retrieves a list of machine types available to the specified project.", +- // "flatPath": "projects/{project}/zones/{zone}/machineTypes", +- // "httpMethod": "GET", +- // "id": "compute.machineTypes.list", ++ // "description": "Creates a machine image in the specified project using the data that is included in the request. If you are creating a new machine image to update an existing instance, your new machine image should use the same network or, if applicable, the same subnetwork as the original instance.", ++ // "flatPath": "projects/{project}/global/machineImages", ++ // "httpMethod": "POST", ++ // "id": "compute.machineImages.insert", + // "parameterOrder": [ +- // "project", +- // "zone" ++ // "project" + // ], + // "parameters": { +- // "filter": { +- // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- // "location": "query", +- // "type": "string" +- // }, +- // "maxResults": { +- // "default": "500", +- // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- // "format": "uint32", +- // "location": "query", +- // "minimum": "0", +- // "type": "integer" +- // }, +- // "orderBy": { +- // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- // "location": "query", +- // "type": "string" +- // }, +- // "pageToken": { +- // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +- // "location": "query", +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -119310,56 +124142,35 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis + // "required": true, + // "type": "string" + // }, +- // "returnPartialSuccess": { +- // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", +- // "type": "boolean" ++ // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, ++ // "sourceInstance": { ++ // "description": "Required. Source instance that is used to create the machine image from.", ++ // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/machineTypes", ++ // "path": "projects/{project}/global/machineImages", ++ // "request": { ++ // "$ref": "MachineImage" ++ // }, + // "response": { +- // "$ref": "MachineTypeList" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// Pages invokes f for each page of results. +-// A non-nil error returned from f will halt the iteration. +-// The provided context supersedes any context provided to the Context method. +-func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { +- c.ctx_ = ctx +- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point +- for { +- x, err := c.Do() +- if err != nil { +- return err +- } +- if err := f(x); err != nil { +- return err +- } +- if x.NextPageToken == "" { +- return nil +- } +- c.PageToken(x.NextPageToken) +- } +-} +- +-// method id "compute.networkAttachments.aggregatedList": ++// method id "compute.machineImages.list": + +-type NetworkAttachmentsAggregatedListCall struct { ++type MachineImagesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -119368,12 +124179,12 @@ type NetworkAttachmentsAggregatedListCall struct { + header_ http.Header + } + +-// AggregatedList: Retrieves the list of all NetworkAttachment +-// resources, regional and global, available to the specified project. ++// List: Retrieves a list of machine images that are contained within ++// the specified project. + // + // - project: Project ID for this request. +-func (r *NetworkAttachmentsService) AggregatedList(project string) *NetworkAttachmentsAggregatedListCall { +- c := &NetworkAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *MachineImagesService) List(project string) *MachineImagesListCall { ++ c := &MachineImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -119413,31 +124224,18 @@ func (r *NetworkAttachmentsService) AggregatedList(project string) *NetworkAttac + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *NetworkAttachmentsAggregatedListCall) Filter(filter string) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { + c.urlParams_.Set("filter", filter) + return c + } + +-// IncludeAllScopes sets the optional parameter "includeAllScopes": +-// Indicates whether every visible scope for each scope type (zone, +-// region, global) should be included in the response. For new resource +-// types added after this field, the flag has no effect as new resource +-// types will always include every visible scope for each scope type in +-// response. For resource types which predate this field, if this flag +-// is omitted or false, only scopes of the scope types where the +-// resource type is expected to be found will be included. +-func (c *NetworkAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkAttachmentsAggregatedListCall { +- c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) +- return c +-} +- + // MaxResults sets the optional parameter "maxResults": The maximum + // number of results per page that should be returned. If the number of + // available results is larger than `maxResults`, Compute Engine returns + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *NetworkAttachmentsAggregatedListCall) MaxResults(maxResults int64) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -119451,7 +124249,7 @@ func (c *NetworkAttachmentsAggregatedListCall) MaxResults(maxResults int64) *Net + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *NetworkAttachmentsAggregatedListCall) OrderBy(orderBy string) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -119459,7 +124257,7 @@ func (c *NetworkAttachmentsAggregatedListCall) OrderBy(orderBy string) *NetworkA + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *NetworkAttachmentsAggregatedListCall) PageToken(pageToken string) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -119468,7 +124266,7 @@ func (c *NetworkAttachmentsAggregatedListCall) PageToken(pageToken string) *Netw + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *NetworkAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineImagesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -119476,7 +124274,7 @@ func (c *NetworkAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartia + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -119486,7 +124284,7 @@ func (c *NetworkAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *Net + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -119494,21 +124292,21 @@ func (c *NetworkAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *Ne + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsAggregatedListCall) Context(ctx context.Context) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) Context(ctx context.Context) *MachineImagesListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsAggregatedListCall) Header() http.Header { ++func (c *MachineImagesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -119521,7 +124319,7 @@ func (c *NetworkAttachmentsAggregatedListCall) doRequest(alt string) (*http.Resp + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -119534,14 +124332,14 @@ func (c *NetworkAttachmentsAggregatedListCall) doRequest(alt string) (*http.Resp + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.aggregatedList" call. +-// Exactly one of *NetworkAttachmentAggregatedList or error will be +-// non-nil. Any non-2xx status code is an error. Response headers are in +-// either *NetworkAttachmentAggregatedList.ServerResponse.Header or (if +-// a response was returned at all) in error.(*googleapi.Error).Header. +-// Use googleapi.IsNotModified to check whether the returned error was ++// Do executes the "compute.machineImages.list" call. ++// Exactly one of *MachineImageList or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *MachineImageList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentAggregatedList, error) { ++func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -119560,7 +124358,7 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkAttachmentAggregatedList{ ++ ret := &MachineImageList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -119572,10 +124370,10 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Retrieves the list of all NetworkAttachment resources, regional and global, available to the specified project.", +- // "flatPath": "projects/{project}/aggregated/networkAttachments", ++ // "description": "Retrieves a list of machine images that are contained within the specified project.", ++ // "flatPath": "projects/{project}/global/machineImages", + // "httpMethod": "GET", +- // "id": "compute.networkAttachments.aggregatedList", ++ // "id": "compute.machineImages.list", + // "parameterOrder": [ + // "project" + // ], +@@ -119585,11 +124383,6 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) + // "location": "query", + // "type": "string" + // }, +- // "includeAllScopes": { +- // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -119621,9 +124414,9 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/aggregated/networkAttachments", ++ // "path": "projects/{project}/global/machineImages", + // "response": { +- // "$ref": "NetworkAttachmentAggregatedList" ++ // "$ref": "MachineImageList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -119637,7 +124430,7 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *NetworkAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkAttachmentAggregatedList) error) error { ++func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -119655,53 +124448,35 @@ func (c *NetworkAttachmentsAggregatedListCall) Pages(ctx context.Context, f func + } + } + +-// method id "compute.networkAttachments.delete": ++// method id "compute.machineImages.setIamPolicy": + +-type NetworkAttachmentsDeleteCall struct { +- s *Service +- project string +- region string +- networkAttachment string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type MachineImagesSetIamPolicyCall struct { ++ s *Service ++ project string ++ resource string ++ globalsetpolicyrequest *GlobalSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified NetworkAttachment in the given scope ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. + // +-// - networkAttachment: Name of the NetworkAttachment resource to +-// delete. +-// - project: Project ID for this request. +-// - region: Name of the region of this request. +-func (r *NetworkAttachmentsService) Delete(project string, region string, networkAttachment string) *NetworkAttachmentsDeleteCall { +- c := &NetworkAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++func (r *MachineImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *MachineImagesSetIamPolicyCall { ++ c := &MachineImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.networkAttachment = networkAttachment +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +-// MixerMutationRequestBuilder +-func (c *NetworkAttachmentsDeleteCall) RequestId(requestId string) *NetworkAttachmentsDeleteCall { +- c.urlParams_.Set("requestId", requestId) ++ c.resource = resource ++ c.globalsetpolicyrequest = globalsetpolicyrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsDeleteCall) Fields(s ...googleapi.Field) *NetworkAttachmentsDeleteCall { ++func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -119709,21 +124484,21 @@ func (c *NetworkAttachmentsDeleteCall) Fields(s ...googleapi.Field) *NetworkAtta + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsDeleteCall) Context(ctx context.Context) *NetworkAttachmentsDeleteCall { ++func (c *MachineImagesSetIamPolicyCall) Context(ctx context.Context) *MachineImagesSetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsDeleteCall) Header() http.Header { ++func (c *MachineImagesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -119731,31 +124506,35 @@ func (c *NetworkAttachmentsDeleteCall) doRequest(alt string) (*http.Response, er + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("DELETE", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "networkAttachment": c.networkAttachment, ++ "project": c.project, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.delete" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.machineImages.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -119774,7 +124553,7 @@ func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operat + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -119786,23 +124565,15 @@ func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operat + } + return ret, nil + // { +- // "description": "Deletes the specified NetworkAttachment in the given scope", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", +- // "httpMethod": "DELETE", +- // "id": "compute.networkAttachments.delete", ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/global/machineImages/{resource}/setIamPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.machineImages.setIamPolicy", + // "parameterOrder": [ + // "project", +- // "region", +- // "networkAttachment" ++ // "resource" + // ], + // "parameters": { +- // "networkAttachment": { +- // "description": "Name of the NetworkAttachment resource to delete.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -119810,22 +124581,20 @@ func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operat + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region of this request.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" +- // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", +- // "location": "query", +- // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", ++ // "path": "projects/{project}/global/machineImages/{resource}/setIamPolicy", ++ // "request": { ++ // "$ref": "GlobalSetPolicyRequest" ++ // }, + // "response": { +- // "$ref": "Operation" ++ // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -119835,105 +124604,93 @@ func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operat + + } + +-// method id "compute.networkAttachments.get": ++// method id "compute.machineImages.testIamPermissions": + +-type NetworkAttachmentsGetCall struct { +- s *Service +- project string +- region string +- networkAttachment string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type MachineImagesTestIamPermissionsCall struct { ++ s *Service ++ project string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified NetworkAttachment resource in the given +-// scope. ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. + // +-// - networkAttachment: Name of the NetworkAttachment resource to +-// return. +-// - project: Project ID for this request. +-// - region: Name of the region of this request. +-func (r *NetworkAttachmentsService) Get(project string, region string, networkAttachment string) *NetworkAttachmentsGetCall { +- c := &NetworkAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++func (r *MachineImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *MachineImagesTestIamPermissionsCall { ++ c := &MachineImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.networkAttachment = networkAttachment ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsGetCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetCall { ++func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *MachineImagesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *NetworkAttachmentsGetCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsGetCall) Context(ctx context.Context) *NetworkAttachmentsGetCall { ++func (c *MachineImagesTestIamPermissionsCall) Context(ctx context.Context) *MachineImagesTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsGetCall) Header() http.Header { ++func (c *MachineImagesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "networkAttachment": c.networkAttachment, ++ "project": c.project, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.get" call. +-// Exactly one of *NetworkAttachment or error will be non-nil. Any ++// Do executes the "compute.machineImages.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either +-// *NetworkAttachment.ServerResponse.Header or (if a response was ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAttachment, error) { ++func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -119952,7 +124709,7 @@ func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAt + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkAttachment{ ++ ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -119964,23 +124721,15 @@ func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAt + } + return ret, nil + // { +- // "description": "Returns the specified NetworkAttachment resource in the given scope.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", +- // "httpMethod": "GET", +- // "id": "compute.networkAttachments.get", ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/global/machineImages/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.machineImages.testIamPermissions", + // "parameterOrder": [ + // "project", +- // "region", +- // "networkAttachment" ++ // "resource" + // ], + // "parameters": { +- // "networkAttachment": { +- // "description": "Name of the NetworkAttachment resource to return.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -119988,17 +124737,20 @@ func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAt + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region of this request.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", ++ // "path": "projects/{project}/global/machineImages/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, + // "response": { +- // "$ref": "NetworkAttachment" ++ // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -120009,46 +124761,127 @@ func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAt + + } + +-// method id "compute.networkAttachments.getIamPolicy": ++// method id "compute.machineTypes.aggregatedList": + +-type NetworkAttachmentsGetIamPolicyCall struct { ++type MachineTypesAggregatedListCall struct { + s *Service + project string +- region string +- resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// GetIamPolicy: Gets the access control policy for a resource. May be +-// empty if no such policy or resource exists. ++// AggregatedList: Retrieves an aggregated list of machine types. + // + // - project: Project ID for this request. +-// - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *NetworkAttachmentsService) GetIamPolicy(project string, region string, resource string) *NetworkAttachmentsGetIamPolicyCall { +- c := &NetworkAttachmentsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { ++ c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.resource = resource + return c + } + +-// OptionsRequestedPolicyVersion sets the optional parameter +-// "optionsRequestedPolicyVersion": Requested IAM Policy version. +-func (c *NetworkAttachmentsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NetworkAttachmentsGetIamPolicyCall { +- c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("filter", filter) + return c + } + +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *NetworkAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetIamPolicyCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c ++// IncludeAllScopes sets the optional parameter "includeAllScopes": ++// Indicates whether every visible scope for each scope type (zone, ++// region, global) should be included in the response. For new resource ++// types added after this field, the flag has no effect as new resource ++// types will always include every visible scope for each scope type in ++// response. For resource types which predate this field, if this flag ++// is omitted or false, only scopes of the scope types where the ++// resource type is expected to be found will be included. ++func (c *MachineTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *MachineTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c + } + + // IfNoneMatch sets the optional parameter which makes the operation +@@ -120056,7 +124889,7 @@ func (c *NetworkAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *Netwo + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetIamPolicyCall { ++func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -120064,21 +124897,21 @@ func (c *NetworkAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *Netw + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsGetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsGetIamPolicyCall { ++func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsGetIamPolicyCall) Header() http.Header { ++func (c *MachineTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -120091,7 +124924,7 @@ func (c *NetworkAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Respon + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/machineTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -120099,21 +124932,19 @@ func (c *NetworkAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Respon + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, ++ "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.getIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++// Do executes the "compute.machineTypes.aggregatedList" call. ++// Exactly one of *MachineTypeAggregatedList or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *MachineTypeAggregatedList.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -120132,7 +124963,7 @@ func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Policy{ ++ ret := &MachineTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -120144,47 +124975,58 @@ func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { +- // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", ++ // "description": "Retrieves an aggregated list of machine types.", ++ // "flatPath": "projects/{project}/aggregated/machineTypes", + // "httpMethod": "GET", +- // "id": "compute.networkAttachments.getIamPolicy", ++ // "id": "compute.machineTypes.aggregatedList", + // "parameterOrder": [ +- // "project", +- // "region", +- // "resource" ++ // "project" + // ], + // "parameters": { +- // "optionsRequestedPolicyVersion": { +- // "description": "Requested IAM Policy version.", +- // "format": "int32", ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "includeAllScopes": { ++ // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", + // "location": "query", ++ // "minimum": "0", + // "type": "integer" + // }, +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", + // "type": "string" + // }, +- // "region": { +- // "description": "The name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", ++ // "project": { ++ // "description": "Project ID for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", ++ // "path": "projects/{project}/aggregated/machineTypes", + // "response": { +- // "$ref": "Policy" ++ // "$ref": "MachineTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -120195,110 +125037,124 @@ func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + + } + +-// method id "compute.networkAttachments.insert": ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} + +-type NetworkAttachmentsInsertCall struct { +- s *Service +- project string +- region string +- networkattachment *NetworkAttachment +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++// method id "compute.machineTypes.get": ++ ++type MachineTypesGetCall struct { ++ s *Service ++ project string ++ zone string ++ machineType string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates a NetworkAttachment in the specified project in the +-// given scope using the parameters that are included in the request. ++// Get: Returns the specified machine type. + // ++// - machineType: Name of the machine type to return. + // - project: Project ID for this request. +-// - region: Name of the region of this request. +-func (r *NetworkAttachmentsService) Insert(project string, region string, networkattachment *NetworkAttachment) *NetworkAttachmentsInsertCall { +- c := &NetworkAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - zone: The name of the zone for this request. ++func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { ++ c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.networkattachment = networkattachment +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +-// MixerMutationRequestBuilder +-func (c *NetworkAttachmentsInsertCall) RequestId(requestId string) *NetworkAttachmentsInsertCall { +- c.urlParams_.Set("requestId", requestId) ++ c.zone = zone ++ c.machineType = machineType + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsInsertCall) Fields(s ...googleapi.Field) *NetworkAttachmentsInsertCall { ++func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsInsertCall) Context(ctx context.Context) *NetworkAttachmentsInsertCall { ++func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsInsertCall) Header() http.Header { ++func (c *MachineTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkattachment) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes/{machineType}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, ++ "project": c.project, ++ "zone": c.zone, ++ "machineType": c.machineType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.insert" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// Do executes the "compute.machineTypes.get" call. ++// Exactly one of *MachineType or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at ++// *MachineType.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -120317,7 +125173,7 @@ func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operat + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &MachineType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -120329,15 +125185,23 @@ func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operat + } + return ret, nil + // { +- // "description": "Creates a NetworkAttachment in the specified project in the given scope using the parameters that are included in the request.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments", +- // "httpMethod": "POST", +- // "id": "compute.networkAttachments.insert", ++ // "description": "Returns the specified machine type.", ++ // "flatPath": "projects/{project}/zones/{zone}/machineTypes/{machineType}", ++ // "httpMethod": "GET", ++ // "id": "compute.machineTypes.get", + // "parameterOrder": [ + // "project", +- // "region" ++ // "zone", ++ // "machineType" + // ], + // "parameters": { ++ // "machineType": { ++ // "description": "Name of the machine type to return.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -120345,54 +125209,48 @@ func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operat + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region of this request.", ++ // "zone": { ++ // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" +- // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", +- // "location": "query", +- // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments", +- // "request": { +- // "$ref": "NetworkAttachment" +- // }, ++ // "path": "projects/{project}/zones/{zone}/machineTypes/{machineType}", + // "response": { +- // "$ref": "Operation" ++ // "$ref": "MachineType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.networkAttachments.list": ++// method id "compute.machineTypes.list": + +-type NetworkAttachmentsListCall struct { ++type MachineTypesListCall struct { + s *Service + project string +- region string ++ zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// List: Lists the NetworkAttachments for a project in the given scope. ++// List: Retrieves a list of machine types available to the specified ++// project. + // + // - project: Project ID for this request. +-// - region: Name of the region of this request. +-func (r *NetworkAttachmentsService) List(project string, region string) *NetworkAttachmentsListCall { +- c := &NetworkAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - zone: The name of the zone for this request. ++func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { ++ c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region ++ c.zone = zone + return c + } + +@@ -120431,7 +125289,7 @@ func (r *NetworkAttachmentsService) List(project string, region string) *Network + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *NetworkAttachmentsListCall) Filter(filter string) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -120442,7 +125300,7 @@ func (c *NetworkAttachmentsListCall) Filter(filter string) *NetworkAttachmentsLi + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *NetworkAttachmentsListCall) MaxResults(maxResults int64) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -120456,7 +125314,7 @@ func (c *NetworkAttachmentsListCall) MaxResults(maxResults int64) *NetworkAttach + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *NetworkAttachmentsListCall) OrderBy(orderBy string) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -120464,7 +125322,7 @@ func (c *NetworkAttachmentsListCall) OrderBy(orderBy string) *NetworkAttachments + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *NetworkAttachmentsListCall) PageToken(pageToken string) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -120473,7 +125331,7 @@ func (c *NetworkAttachmentsListCall) PageToken(pageToken string) *NetworkAttachm + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *NetworkAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -120481,7 +125339,7 @@ func (c *NetworkAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess b + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -120491,7 +125349,7 @@ func (c *NetworkAttachmentsListCall) Fields(s ...googleapi.Field) *NetworkAttach + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkAttachmentsListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -120499,21 +125357,21 @@ func (c *NetworkAttachmentsListCall) IfNoneMatch(entityTag string) *NetworkAttac + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsListCall) Context(ctx context.Context) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsListCall) Header() http.Header { ++func (c *MachineTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -120526,7 +125384,7 @@ func (c *NetworkAttachmentsListCall) doRequest(alt string) (*http.Response, erro + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -120535,19 +125393,19 @@ func (c *NetworkAttachmentsListCall) doRequest(alt string) (*http.Response, erro + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "region": c.region, ++ "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.list" call. +-// Exactly one of *NetworkAttachmentList or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *NetworkAttachmentList.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.machineTypes.list" call. ++// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *MachineTypeList.ServerResponse.Header or (if a response was returned ++// at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentList, error) { ++func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -120566,7 +125424,7 @@ func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkA + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkAttachmentList{ ++ ret := &MachineTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -120578,13 +125436,13 @@ func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkA + } + return ret, nil + // { +- // "description": "Lists the NetworkAttachments for a project in the given scope.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments", ++ // "description": "Retrieves a list of machine types available to the specified project.", ++ // "flatPath": "projects/{project}/zones/{zone}/machineTypes", + // "httpMethod": "GET", +- // "id": "compute.networkAttachments.list", ++ // "id": "compute.machineTypes.list", + // "parameterOrder": [ + // "project", +- // "region" ++ // "zone" + // ], + // "parameters": { + // "filter": { +@@ -120617,22 +125475,22 @@ func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkA + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region of this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments", ++ // "path": "projects/{project}/zones/{zone}/machineTypes", + // "response": { +- // "$ref": "NetworkAttachmentList" ++ // "$ref": "MachineTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -120646,7 +125504,7 @@ func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkA + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkAttachmentList) error) error { ++func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -120664,346 +125522,9 @@ func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkA + } + } + +-// method id "compute.networkAttachments.setIamPolicy": +- +-type NetworkAttachmentsSetIamPolicyCall struct { +- s *Service +- project string +- region string +- resource string +- regionsetpolicyrequest *RegionSetPolicyRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header +-} +- +-// SetIamPolicy: Sets the access control policy on the specified +-// resource. Replaces any existing policy. +-// +-// - project: Project ID for this request. +-// - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *NetworkAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NetworkAttachmentsSetIamPolicyCall { +- c := &NetworkAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.region = region +- c.resource = resource +- c.regionsetpolicyrequest = regionsetpolicyrequest +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsSetIamPolicyCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *NetworkAttachmentsSetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsSetIamPolicyCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *NetworkAttachmentsSetIamPolicyCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.networkAttachments.setIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &Policy{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", +- // "httpMethod": "POST", +- // "id": "compute.networkAttachments.setIamPolicy", +- // "parameterOrder": [ +- // "project", +- // "region", +- // "resource" +- // ], +- // "parameters": { +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // }, +- // "region": { +- // "description": "The name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", +- // "request": { +- // "$ref": "RegionSetPolicyRequest" +- // }, +- // "response": { +- // "$ref": "Policy" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" +- // ] +- // } +- +-} +- +-// method id "compute.networkAttachments.testIamPermissions": +- +-type NetworkAttachmentsTestIamPermissionsCall struct { +- s *Service +- project string +- region string +- resource string +- testpermissionsrequest *TestPermissionsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header +-} +- +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. +-// +-// - project: Project ID for this request. +-// - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *NetworkAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkAttachmentsTestIamPermissionsCall { +- c := &NetworkAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.region = region +- c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkAttachmentsTestIamPermissionsCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *NetworkAttachmentsTestIamPermissionsCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.networkAttachments.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &TestPermissionsResponse{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", +- // "httpMethod": "POST", +- // "id": "compute.networkAttachments.testIamPermissions", +- // "parameterOrder": [ +- // "project", +- // "region", +- // "resource" +- // ], +- // "parameters": { +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // }, +- // "region": { +- // "description": "The name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", +- // "request": { +- // "$ref": "TestPermissionsRequest" +- // }, +- // "response": { +- // "$ref": "TestPermissionsResponse" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" +- // ] +- // } +- +-} +- +-// method id "compute.networkEdgeSecurityServices.aggregatedList": ++// method id "compute.networkAttachments.aggregatedList": + +-type NetworkEdgeSecurityServicesAggregatedListCall struct { ++type NetworkAttachmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -121012,12 +125533,12 @@ type NetworkEdgeSecurityServicesAggregatedListCall struct { + header_ http.Header + } + +-// AggregatedList: Retrieves the list of all NetworkEdgeSecurityService +-// resources available to the specified project. ++// AggregatedList: Retrieves the list of all NetworkAttachment ++// resources, regional and global, available to the specified project. + // +-// - project: Name of the project scoping this request. +-func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *NetworkEdgeSecurityServicesAggregatedListCall { +- c := &NetworkEdgeSecurityServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++func (r *NetworkAttachmentsService) AggregatedList(project string) *NetworkAttachmentsAggregatedListCall { ++ c := &NetworkAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -121057,7 +125578,7 @@ func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *Net + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) Filter(filter string) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -121070,7 +125591,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *N + // response. For resource types which predate this field, if this flag + // is omitted or false, only scopes of the scope types where the + // resource type is expected to be found will be included. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c + } +@@ -121081,7 +125602,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(include + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults int64) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) MaxResults(maxResults int64) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -121095,7 +125616,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults in + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) OrderBy(orderBy string) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -121103,7 +125624,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken string) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) PageToken(pageToken string) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -121112,7 +125633,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken stri + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -121120,7 +125641,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(ret + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -121130,7 +125651,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Fi + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -121138,21 +125659,21 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag st + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) Context(ctx context.Context) *NetworkAttachmentsAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Header() http.Header { ++func (c *NetworkAttachmentsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -121165,7 +125686,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (* + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEdgeSecurityServices") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -121178,16 +125699,14 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (* + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEdgeSecurityServices.aggregatedList" call. +-// Exactly one of *NetworkEdgeSecurityServiceAggregatedList or error +-// will be non-nil. Any non-2xx status code is an error. Response +-// headers are in either +-// *NetworkEdgeSecurityServiceAggregatedList.ServerResponse.Header or +-// (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityServiceAggregatedList, error) { ++// Do executes the "compute.networkAttachments.aggregatedList" call. ++// Exactly one of *NetworkAttachmentAggregatedList or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *NetworkAttachmentAggregatedList.ServerResponse.Header or (if ++// a response was returned at all) in error.(*googleapi.Error).Header. ++// Use googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -121206,7 +125725,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEdgeSecurityServiceAggregatedList{ ++ ret := &NetworkAttachmentAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -121218,10 +125737,10 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal + } + return ret, nil + // { +- // "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", +- // "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", ++ // "description": "Retrieves the list of all NetworkAttachment resources, regional and global, available to the specified project.", ++ // "flatPath": "projects/{project}/aggregated/networkAttachments", + // "httpMethod": "GET", +- // "id": "compute.networkEdgeSecurityServices.aggregatedList", ++ // "id": "compute.networkAttachments.aggregatedList", + // "parameterOrder": [ + // "project" + // ], +@@ -121255,7 +125774,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal + // "type": "string" + // }, + // "project": { +- // "description": "Name of the project scoping this request.", ++ // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, +@@ -121267,9 +125786,9 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/aggregated/networkEdgeSecurityServices", ++ // "path": "projects/{project}/aggregated/networkAttachments", + // "response": { +- // "$ref": "NetworkEdgeSecurityServiceAggregatedList" ++ // "$ref": "NetworkAttachmentAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -121283,7 +125802,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEdgeSecurityServiceAggregatedList) error) error { ++func (c *NetworkAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkAttachmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -121301,29 +125820,29 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Contex + } + } + +-// method id "compute.networkEdgeSecurityServices.delete": ++// method id "compute.networkAttachments.delete": + +-type NetworkEdgeSecurityServicesDeleteCall struct { +- s *Service +- project string +- region string +- networkEdgeSecurityService string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsDeleteCall struct { ++ s *Service ++ project string ++ region string ++ networkAttachment string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified service. ++// Delete: Deletes the specified NetworkAttachment in the given scope + // +-// - networkEdgeSecurityService: Name of the network edge security +-// service to delete. ++// - networkAttachment: Name of the NetworkAttachment resource to ++// delete. + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *NetworkEdgeSecurityServicesService) Delete(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesDeleteCall { +- c := &NetworkEdgeSecurityServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region of this request. ++func (r *NetworkAttachmentsService) Delete(project string, region string, networkAttachment string) *NetworkAttachmentsDeleteCall { ++ c := &NetworkAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.networkEdgeSecurityService = networkEdgeSecurityService ++ c.networkAttachment = networkAttachment + return c + } + +@@ -121337,8 +125856,9 @@ func (r *NetworkEdgeSecurityServicesService) Delete(project string, region strin + // received, and if so, will ignore the second request. This prevents + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *NetworkEdgeSecurityServicesDeleteCall { ++// supported ( 00000000-0000-0000-0000-000000000000). end_interface: ++// MixerMutationRequestBuilder ++func (c *NetworkAttachmentsDeleteCall) RequestId(requestId string) *NetworkAttachmentsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -121346,7 +125866,7 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *Net + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesDeleteCall { ++func (c *NetworkAttachmentsDeleteCall) Fields(s ...googleapi.Field) *NetworkAttachmentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -121354,21 +125874,21 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *Ne + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEdgeSecurityServicesDeleteCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesDeleteCall { ++func (c *NetworkAttachmentsDeleteCall) Context(ctx context.Context) *NetworkAttachmentsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEdgeSecurityServicesDeleteCall) Header() http.Header { ++func (c *NetworkAttachmentsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -121378,7 +125898,7 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Res + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { +@@ -121386,21 +125906,21 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Res + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "networkEdgeSecurityService": c.networkEdgeSecurityService, ++ "project": c.project, ++ "region": c.region, ++ "networkAttachment": c.networkAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEdgeSecurityServices.delete" call. ++// Do executes the "compute.networkAttachments.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -121431,18 +125951,18 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Deletes the specified service.", +- // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "description": "Deletes the specified NetworkAttachment in the given scope", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "httpMethod": "DELETE", +- // "id": "compute.networkEdgeSecurityServices.delete", ++ // "id": "compute.networkAttachments.delete", + // "parameterOrder": [ + // "project", + // "region", +- // "networkEdgeSecurityService" ++ // "networkAttachment" + // ], + // "parameters": { +- // "networkEdgeSecurityService": { +- // "description": "Name of the network edge security service to delete.", ++ // "networkAttachment": { ++ // "description": "Name of the NetworkAttachment resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -121456,19 +125976,19 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "response": { + // "$ref": "Operation" + // }, +@@ -121480,37 +126000,38 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.networkEdgeSecurityServices.get": ++// method id "compute.networkAttachments.get": + +-type NetworkEdgeSecurityServicesGetCall struct { +- s *Service +- project string +- region string +- networkEdgeSecurityService string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsGetCall struct { ++ s *Service ++ project string ++ region string ++ networkAttachment string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Gets a specified NetworkEdgeSecurityService. ++// Get: Returns the specified NetworkAttachment resource in the given ++// scope. + // +-// - networkEdgeSecurityService: Name of the network edge security +-// service to get. ++// - networkAttachment: Name of the NetworkAttachment resource to ++// return. + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *NetworkEdgeSecurityServicesService) Get(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesGetCall { +- c := &NetworkEdgeSecurityServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region of this request. ++func (r *NetworkAttachmentsService) Get(project string, region string, networkAttachment string) *NetworkAttachmentsGetCall { ++ c := &NetworkAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.networkEdgeSecurityService = networkEdgeSecurityService ++ c.networkAttachment = networkAttachment + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesGetCall { ++func (c *NetworkAttachmentsGetCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -121520,7 +126041,7 @@ func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *Netwo + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesGetCall { ++func (c *NetworkAttachmentsGetCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -121528,21 +126049,21 @@ func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *Netw + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEdgeSecurityServicesGetCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesGetCall { ++func (c *NetworkAttachmentsGetCall) Context(ctx context.Context) *NetworkAttachmentsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEdgeSecurityServicesGetCall) Header() http.Header { ++func (c *NetworkAttachmentsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -121555,7 +126076,7 @@ func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Respon + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -121563,21 +126084,21 @@ func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Respon + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "networkEdgeSecurityService": c.networkEdgeSecurityService, ++ "project": c.project, ++ "region": c.region, ++ "networkAttachment": c.networkAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEdgeSecurityServices.get" call. +-// Exactly one of *NetworkEdgeSecurityService or error will be non-nil. +-// Any non-2xx status code is an error. Response headers are in either +-// *NetworkEdgeSecurityService.ServerResponse.Header or (if a response +-// was returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.networkAttachments.get" call. ++// Exactly one of *NetworkAttachment or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *NetworkAttachment.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityService, error) { ++func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAttachment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -121596,7 +126117,7 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEdgeSecurityService{ ++ ret := &NetworkAttachment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -121608,18 +126129,18 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { +- // "description": "Gets a specified NetworkEdgeSecurityService.", +- // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "description": "Returns the specified NetworkAttachment resource in the given scope.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "httpMethod": "GET", +- // "id": "compute.networkEdgeSecurityServices.get", ++ // "id": "compute.networkAttachments.get", + // "parameterOrder": [ + // "project", + // "region", +- // "networkEdgeSecurityService" ++ // "networkAttachment" + // ], + // "parameters": { +- // "networkEdgeSecurityService": { +- // "description": "Name of the network edge security service to get.", ++ // "networkAttachment": { ++ // "description": "Name of the NetworkAttachment resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -121633,16 +126154,16 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "response": { +- // "$ref": "NetworkEdgeSecurityService" ++ // "$ref": "NetworkAttachment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -121653,116 +126174,111 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* + + } + +-// method id "compute.networkEdgeSecurityServices.insert": ++// method id "compute.networkAttachments.getIamPolicy": + +-type NetworkEdgeSecurityServicesInsertCall struct { +- s *Service +- project string +- region string +- networkedgesecurityservice *NetworkEdgeSecurityService +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsGetIamPolicyCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates a new service in the specified project using the data +-// included in the request. ++// GetIamPolicy: Gets the access control policy for a resource. May be ++// empty if no such policy or resource exists. + // + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *NetworkEdgeSecurityServicesService) Insert(project string, region string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesInsertCall { +- c := &NetworkEdgeSecurityServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *NetworkAttachmentsService) GetIamPolicy(project string, region string, resource string) *NetworkAttachmentsGetIamPolicyCall { ++ c := &NetworkAttachmentsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.networkedgesecurityservice = networkedgesecurityservice +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEdgeSecurityServicesInsertCall) RequestId(requestId string) *NetworkEdgeSecurityServicesInsertCall { +- c.urlParams_.Set("requestId", requestId) ++ c.resource = resource + return c + } + +-// ValidateOnly sets the optional parameter "validateOnly": If true, the +-// request will not be committed. +-func (c *NetworkEdgeSecurityServicesInsertCall) ValidateOnly(validateOnly bool) *NetworkEdgeSecurityServicesInsertCall { +- c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) ++// OptionsRequestedPolicyVersion sets the optional parameter ++// "optionsRequestedPolicyVersion": Requested IAM Policy version. ++func (c *NetworkAttachmentsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NetworkAttachmentsGetIamPolicyCall { ++ c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEdgeSecurityServicesInsertCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesInsertCall { ++func (c *NetworkAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *NetworkEdgeSecurityServicesInsertCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesInsertCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetIamPolicyCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkAttachmentsGetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsGetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEdgeSecurityServicesInsertCall) Header() http.Header { ++func (c *NetworkAttachmentsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEdgeSecurityServicesInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEdgeSecurityServices.insert" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.networkAttachments.getIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -121781,7 +126297,7 @@ func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -121793,15 +126309,22 @@ func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Creates a new service in the specified project using the data included in the request.", +- // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", +- // "httpMethod": "POST", +- // "id": "compute.networkEdgeSecurityServices.insert", ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", ++ // "httpMethod": "GET", ++ // "id": "compute.networkAttachments.getIamPolicy", + // "parameterOrder": [ + // "project", +- // "region" ++ // "region", ++ // "resource" + // ], + // "parameters": { ++ // "optionsRequestedPolicyVersion": { ++ // "description": "Requested IAM Policy version.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -121810,70 +126333,55 @@ func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, + // "type": "string" +- // }, +- // "validateOnly": { +- // "description": "If true, the request will not be committed.", +- // "location": "query", +- // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", +- // "request": { +- // "$ref": "NetworkEdgeSecurityService" +- // }, ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", + // "response": { +- // "$ref": "Operation" ++ // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.networkEdgeSecurityServices.patch": ++// method id "compute.networkAttachments.insert": + +-type NetworkEdgeSecurityServicesPatchCall struct { +- s *Service +- project string +- region string +- networkEdgeSecurityService string +- networkedgesecurityservice *NetworkEdgeSecurityService +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsInsertCall struct { ++ s *Service ++ project string ++ region string ++ networkattachment *NetworkAttachment ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Patch: Patches the specified policy with the data included in the +-// request. ++// Insert: Creates a NetworkAttachment in the specified project in the ++// given scope using the parameters that are included in the request. + // +-// - networkEdgeSecurityService: Name of the network edge security +-// service to update. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *NetworkEdgeSecurityServicesService) Patch(project string, region string, networkEdgeSecurityService string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesPatchCall { +- c := &NetworkEdgeSecurityServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - region: Name of the region of this request. ++func (r *NetworkAttachmentsService) Insert(project string, region string, networkattachment *NetworkAttachment) *NetworkAttachmentsInsertCall { ++ c := &NetworkAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.networkEdgeSecurityService = networkEdgeSecurityService +- c.networkedgesecurityservice = networkedgesecurityservice +- return c +-} +- +-// Paths sets the optional parameter "paths": +-func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEdgeSecurityServicesPatchCall { +- c.urlParams_.SetMulti("paths", append([]string{}, paths...)) ++ c.networkattachment = networkattachment + return c + } + +@@ -121887,23 +126395,17 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEd + // received, and if so, will ignore the second request. This prevents + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEdgeSecurityServicesPatchCall) RequestId(requestId string) *NetworkEdgeSecurityServicesPatchCall { ++// supported ( 00000000-0000-0000-0000-000000000000). end_interface: ++// MixerMutationRequestBuilder ++func (c *NetworkAttachmentsInsertCall) RequestId(requestId string) *NetworkAttachmentsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } + +-// UpdateMask sets the optional parameter "updateMask": Indicates fields +-// to be updated as part of this request. +-func (c *NetworkEdgeSecurityServicesPatchCall) UpdateMask(updateMask string) *NetworkEdgeSecurityServicesPatchCall { +- c.urlParams_.Set("updateMask", updateMask) +- return c +-} +- + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesPatchCall { ++func (c *NetworkAttachmentsInsertCall) Fields(s ...googleapi.Field) *NetworkAttachmentsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -121911,21 +126413,21 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *Net + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEdgeSecurityServicesPatchCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesPatchCall { ++func (c *NetworkAttachmentsInsertCall) Context(ctx context.Context) *NetworkAttachmentsInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEdgeSecurityServicesPatchCall) Header() http.Header { ++func (c *NetworkAttachmentsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -121933,36 +126435,35 @@ func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Resp + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "networkEdgeSecurityService": c.networkEdgeSecurityService, ++ "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEdgeSecurityServices.patch" call. ++// Do executes the "compute.networkAttachments.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -121993,28 +126494,15 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Patches the specified policy with the data included in the request.", +- // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", +- // "httpMethod": "PATCH", +- // "id": "compute.networkEdgeSecurityServices.patch", ++ // "description": "Creates a NetworkAttachment in the specified project in the given scope using the parameters that are included in the request.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments", ++ // "httpMethod": "POST", ++ // "id": "compute.networkAttachments.insert", + // "parameterOrder": [ + // "project", +- // "region", +- // "networkEdgeSecurityService" ++ // "region" + // ], + // "parameters": { +- // "networkEdgeSecurityService": { +- // "description": "Name of the network edge security service to update.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, +- // "paths": { +- // "location": "query", +- // "repeated": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -122023,27 +126511,21 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", +- // "type": "string" +- // }, +- // "updateMask": { +- // "description": "Indicates fields to be updated as part of this request.", +- // "format": "google-fieldmask", ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "path": "projects/{project}/regions/{region}/networkAttachments", + // "request": { +- // "$ref": "NetworkEdgeSecurityService" ++ // "$ref": "NetworkAttachment" + // }, + // "response": { + // "$ref": "Operation" +@@ -122056,24 +126538,26 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.networkEndpointGroups.aggregatedList": ++// method id "compute.networkAttachments.list": + +-type NetworkEndpointGroupsAggregatedListCall struct { ++type NetworkAttachmentsListCall struct { + s *Service + project string ++ region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// AggregatedList: Retrieves the list of network endpoint groups and +-// sorts them by zone. ++// List: Lists the NetworkAttachments for a project in the given scope. + // + // - project: Project ID for this request. +-func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { +- c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region of this request. ++func (r *NetworkAttachmentsService) List(project string, region string) *NetworkAttachmentsListCall { ++ c := &NetworkAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project ++ c.region = region + return c + } + +@@ -122112,31 +126596,18 @@ func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEn + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) Filter(filter string) *NetworkAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c + } + +-// IncludeAllScopes sets the optional parameter "includeAllScopes": +-// Indicates whether every visible scope for each scope type (zone, +-// region, global) should be included in the response. For new resource +-// types added after this field, the flag has no effect as new resource +-// types will always include every visible scope for each scope type in +-// response. For resource types which predate this field, if this flag +-// is omitted or false, only scopes of the scope types where the +-// resource type is expected to be found will be included. +-func (c *NetworkEndpointGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEndpointGroupsAggregatedListCall { +- c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) +- return c +-} +- + // MaxResults sets the optional parameter "maxResults": The maximum + // number of results per page that should be returned. If the number of + // available results is larger than `maxResults`, Compute Engine returns + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) MaxResults(maxResults int64) *NetworkAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -122150,7 +126621,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) * + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) OrderBy(orderBy string) *NetworkAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -122158,7 +126629,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *Netwo + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) PageToken(pageToken string) *NetworkAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -122167,7 +126638,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *N + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -122175,7 +126646,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPar + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -122185,7 +126656,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) * + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -122193,21 +126664,21 @@ func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) Context(ctx context.Context) *NetworkAttachmentsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { ++func (c *NetworkAttachmentsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -122220,7 +126691,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.R + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEndpointGroups") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -122229,19 +126700,19 @@ func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.R + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.aggregatedList" call. +-// Exactly one of *NetworkEndpointGroupAggregatedList or error will be +-// non-nil. Any non-2xx status code is an error. Response headers are in +-// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or +-// (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { ++// Do executes the "compute.networkAttachments.list" call. ++// Exactly one of *NetworkAttachmentList or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *NetworkAttachmentList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -122260,7 +126731,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEndpointGroupAggregatedList{ ++ ret := &NetworkAttachmentList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -122272,12 +126743,13 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio + } + return ret, nil + // { +- // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", +- // "flatPath": "projects/{project}/aggregated/networkEndpointGroups", ++ // "description": "Lists the NetworkAttachments for a project in the given scope.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments", + // "httpMethod": "GET", +- // "id": "compute.networkEndpointGroups.aggregatedList", ++ // "id": "compute.networkAttachments.list", + // "parameterOrder": [ +- // "project" ++ // "project", ++ // "region" + // ], + // "parameters": { + // "filter": { +@@ -122285,11 +126757,6 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio + // "location": "query", + // "type": "string" + // }, +- // "includeAllScopes": { +- // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -122315,15 +126782,22 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region of this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/aggregated/networkEndpointGroups", ++ // "path": "projects/{project}/regions/{region}/networkAttachments", + // "response": { +- // "$ref": "NetworkEndpointGroupAggregatedList" ++ // "$ref": "NetworkAttachmentList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -122337,7 +126811,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { ++func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -122355,57 +126829,38 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f f + } + } + +-// method id "compute.networkEndpointGroups.attachNetworkEndpoints": ++// method id "compute.networkAttachments.setIamPolicy": + +-type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { +- s *Service +- project string +- zone string +- networkEndpointGroup string +- networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsSetIamPolicyCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ regionsetpolicyrequest *RegionSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// AttachNetworkEndpoints: Attach a list of network endpoints to the +-// specified network endpoint group. ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. + // +-// - networkEndpointGroup: The name of the network endpoint group where +-// you are attaching network endpoints to. It should comply with +-// RFC1035. +-// - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +- c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *NetworkAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NetworkAttachmentsSetIamPolicyCall { ++ c := &NetworkAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.networkEndpointGroup = networkEndpointGroup +- c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +- c.urlParams_.Set("requestId", requestId) ++ c.region = region ++ c.resource = resource ++ c.regionsetpolicyrequest = regionsetpolicyrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { ++func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -122413,21 +126868,21 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi. + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { ++func (c *NetworkAttachmentsSetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsSetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { ++func (c *NetworkAttachmentsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -122435,14 +126890,14 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -122450,21 +126905,21 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "networkEndpointGroup": c.networkEndpointGroup, ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.networkAttachments.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -122483,7 +126938,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -122495,22 +126950,16 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C + } + return ret, nil + // { +- // "description": "Attach a list of network endpoints to the specified network endpoint group.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + // "httpMethod": "POST", +- // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", ++ // "id": "compute.networkAttachments.setIamPolicy", + // "parameterOrder": [ + // "project", +- // "zone", +- // "networkEndpointGroup" ++ // "region", ++ // "resource" + // ], + // "parameters": { +- // "networkEndpointGroup": { +- // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -122518,24 +126967,27 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + // "request": { +- // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" ++ // "$ref": "RegionSetPolicyRequest" + // }, + // "response": { +- // "$ref": "Operation" ++ // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -122545,56 +126997,38 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C + + } + +-// method id "compute.networkEndpointGroups.delete": ++// method id "compute.networkAttachments.testIamPermissions": + +-type NetworkEndpointGroupsDeleteCall struct { +- s *Service +- project string +- zone string +- networkEndpointGroup string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsTestIamPermissionsCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified network endpoint group. The network +-// endpoints in the NEG and the VM instances they belong to are not +-// terminated when the NEG is deleted. Note that the NEG cannot be +-// deleted if there are backend services referencing it. ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. + // +-// - networkEndpointGroup: The name of the network endpoint group to +-// delete. It should comply with RFC1035. +-// - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { +- c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *NetworkAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkAttachmentsTestIamPermissionsCall { ++ c := &NetworkAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.networkEndpointGroup = networkEndpointGroup +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { +- c.urlParams_.Set("requestId", requestId) ++ c.region = region ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { ++func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkAttachmentsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -122602,21 +127036,21 @@ func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkE + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { ++func (c *NetworkAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *NetworkAttachmentsTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { ++func (c *NetworkAttachmentsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -122624,31 +127058,36 @@ func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("DELETE", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "networkEndpointGroup": c.networkEndpointGroup, ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.delete" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.networkAttachments.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -122667,7 +127106,7 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -122679,22 +127118,16 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope + } + return ret, nil + // { +- // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", +- // "httpMethod": "DELETE", +- // "id": "compute.networkEndpointGroups.delete", ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.networkAttachments.testIamPermissions", + // "parameterOrder": [ + // "project", +- // "zone", +- // "networkEndpointGroup" ++ // "region", ++ // "resource" + // ], + // "parameters": { +- // "networkEndpointGroup": { +- // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -122702,139 +127135,224 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, + // "response": { +- // "$ref": "Operation" ++ // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.networkEndpointGroups.detachNetworkEndpoints": ++// method id "compute.networkEdgeSecurityServices.aggregatedList": + +-type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { +- s *Service +- project string +- zone string +- networkEndpointGroup string +- networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEdgeSecurityServicesAggregatedListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// DetachNetworkEndpoints: Detach a list of network endpoints from the +-// specified network endpoint group. ++// AggregatedList: Retrieves the list of all NetworkEdgeSecurityService ++// resources available to the specified project. + // +-// - networkEndpointGroup: The name of the network endpoint group where +-// you are removing network endpoints. It should comply with RFC1035. +-// - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +- c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Name of the project scoping this request. ++func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c := &NetworkEdgeSecurityServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.networkEndpointGroup = networkEndpointGroup +- c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest + return c + } + +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +- c.urlParams_.Set("requestId", requestId) ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// IncludeAllScopes sets the optional parameter "includeAllScopes": ++// Indicates whether every visible scope for each scope type (zone, ++// region, global) should be included in the response. For new resource ++// types added after this field, the flag has no effect as new resource ++// types will always include every visible scope for each scope type in ++// response. For resource types which predate this field, if this flag ++// is omitted or false, only scopes of the scope types where the ++// resource type is expected to be found will be included. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults int64) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken string) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEdgeSecurityServices") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "networkEndpointGroup": c.networkEndpointGroup, ++ "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.networkEdgeSecurityServices.aggregatedList" call. ++// Exactly one of *NetworkEdgeSecurityServiceAggregatedList or error ++// will be non-nil. Any non-2xx status code is an error. Response ++// headers are in either ++// *NetworkEdgeSecurityServiceAggregatedList.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityServiceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -122853,7 +127371,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &NetworkEdgeSecurityServiceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -122865,155 +127383,189 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C + } + return ret, nil + // { +- // "description": "Detach a list of network endpoints from the specified network endpoint group.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", +- // "httpMethod": "POST", +- // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", ++ // "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", ++ // "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", ++ // "httpMethod": "GET", ++ // "id": "compute.networkEdgeSecurityServices.aggregatedList", + // "parameterOrder": [ +- // "project", +- // "zone", +- // "networkEndpointGroup" ++ // "project" + // ], + // "parameters": { +- // "networkEndpointGroup": { +- // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", + // "type": "string" + // }, +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, ++ // "includeAllScopes": { ++ // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "project": { ++ // "description": "Name of the project scoping this request.", + // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", +- // "request": { +- // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" +- // }, ++ // "path": "projects/{project}/aggregated/networkEdgeSecurityServices", + // "response": { +- // "$ref": "Operation" ++ // "$ref": "NetworkEdgeSecurityServiceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.networkEndpointGroups.get": ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEdgeSecurityServiceAggregatedList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} + +-type NetworkEndpointGroupsGetCall struct { +- s *Service +- project string +- zone string +- networkEndpointGroup string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++// method id "compute.networkEdgeSecurityServices.delete": ++ ++type NetworkEdgeSecurityServicesDeleteCall struct { ++ s *Service ++ project string ++ region string ++ networkEdgeSecurityService string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified network endpoint group. ++// Delete: Deletes the specified service. + // +-// - networkEndpointGroup: The name of the network endpoint group. It +-// should comply with RFC1035. ++// - networkEdgeSecurityService: Name of the network edge security ++// service to delete. + // - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { +- c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region scoping this request. ++func (r *NetworkEdgeSecurityServicesService) Delete(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesDeleteCall { ++ c := &NetworkEdgeSecurityServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.networkEndpointGroup = networkEndpointGroup ++ c.region = region ++ c.networkEdgeSecurityService = networkEdgeSecurityService ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *NetworkEdgeSecurityServicesDeleteCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { ++func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { ++func (c *NetworkEdgeSecurityServicesDeleteCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsGetCall) Header() http.Header { ++func (c *NetworkEdgeSecurityServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "networkEndpointGroup": c.networkEndpointGroup, ++ "project": c.project, ++ "region": c.region, ++ "networkEdgeSecurityService": c.networkEdgeSecurityService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.get" call. +-// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { ++// Do executes the "compute.networkEdgeSecurityServices.delete" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123032,7 +127584,7 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEndpointGroup{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -123044,19 +127596,20 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ + } + return ret, nil + // { +- // "description": "Returns the specified network endpoint group.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", +- // "httpMethod": "GET", +- // "id": "compute.networkEndpointGroups.get", ++ // "description": "Deletes the specified service.", ++ // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.networkEdgeSecurityServices.delete", + // "parameterOrder": [ + // "project", +- // "zone", +- // "networkEndpointGroup" ++ // "region", ++ // "networkEdgeSecurityService" + // ], + // "parameters": { +- // "networkEndpointGroup": { +- // "description": "The name of the network endpoint group. It should comply with RFC1035.", ++ // "networkEdgeSecurityService": { ++ // "description": "Name of the network edge security service to delete.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -123067,49 +127620,226 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ + // "required": true, + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "region": { ++ // "description": "Name of the region scoping this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", ++ // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "response": { +- // "$ref": "NetworkEndpointGroup" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.networkEndpointGroups.insert": ++// method id "compute.networkEdgeSecurityServices.get": + +-type NetworkEndpointGroupsInsertCall struct { +- s *Service +- project string +- zone string +- networkendpointgroup *NetworkEndpointGroup +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEdgeSecurityServicesGetCall struct { ++ s *Service ++ project string ++ region string ++ networkEdgeSecurityService string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates a network endpoint group in the specified project +-// using the parameters that are included in the request. ++// Get: Gets a specified NetworkEdgeSecurityService. + // ++// - networkEdgeSecurityService: Name of the network edge security ++// service to get. + // - project: Project ID for this request. +-// - zone: The name of the zone where you want to create the network +-// endpoint group. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { +- c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.zone = zone +- c.networkendpointgroup = networkendpointgroup ++// - region: Name of the region scoping this request. ++func (r *NetworkEdgeSecurityServicesService) Get(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesGetCall { ++ c := &NetworkEdgeSecurityServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.networkEdgeSecurityService = networkEdgeSecurityService ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEdgeSecurityServicesGetCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEdgeSecurityServicesGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "networkEdgeSecurityService": c.networkEdgeSecurityService, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEdgeSecurityServices.get" call. ++// Exactly one of *NetworkEdgeSecurityService or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *NetworkEdgeSecurityService.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityService, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &NetworkEdgeSecurityService{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Gets a specified NetworkEdgeSecurityService.", ++ // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "httpMethod": "GET", ++ // "id": "compute.networkEdgeSecurityServices.get", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "networkEdgeSecurityService" ++ // ], ++ // "parameters": { ++ // "networkEdgeSecurityService": { ++ // "description": "Name of the network edge security service to get.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "response": { ++ // "$ref": "NetworkEdgeSecurityService" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkEdgeSecurityServices.insert": ++ ++type NetworkEdgeSecurityServicesInsertCall struct { ++ s *Service ++ project string ++ region string ++ networkedgesecurityservice *NetworkEdgeSecurityService ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates a new service in the specified project using the data ++// included in the request. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *NetworkEdgeSecurityServicesService) Insert(project string, region string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesInsertCall { ++ c := &NetworkEdgeSecurityServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.networkedgesecurityservice = networkedgesecurityservice + return c + } + +@@ -123124,15 +127854,22 @@ func (r *NetworkEndpointGroupsService) Insert(project string, zone string, netwo + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { ++func (c *NetworkEdgeSecurityServicesInsertCall) RequestId(requestId string) *NetworkEdgeSecurityServicesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } + ++// ValidateOnly sets the optional parameter "validateOnly": If true, the ++// request will not be committed. ++func (c *NetworkEdgeSecurityServicesInsertCall) ValidateOnly(validateOnly bool) *NetworkEdgeSecurityServicesInsertCall { ++ c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) ++ return c ++} ++ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { ++func (c *NetworkEdgeSecurityServicesInsertCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -123140,21 +127877,21 @@ func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkE + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { ++func (c *NetworkEdgeSecurityServicesInsertCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { ++func (c *NetworkEdgeSecurityServicesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEdgeSecurityServicesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -123162,14 +127899,14 @@ func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -123178,19 +127915,19 @@ func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "zone": c.zone, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.insert" call. ++// Do executes the "compute.networkEdgeSecurityServices.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123221,13 +127958,13 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope + } + return ret, nil + // { +- // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "description": "Creates a new service in the specified project using the data included in the request.", ++ // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + // "httpMethod": "POST", +- // "id": "compute.networkEndpointGroups.insert", ++ // "id": "compute.networkEdgeSecurityServices.insert", + // "parameterOrder": [ + // "project", +- // "zone" ++ // "region" + // ], + // "parameters": { + // "project": { +@@ -123237,21 +127974,27 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, +- // "type": "string" ++ // "validateOnly": { ++ // "description": "If true, the request will not be committed.", ++ // "location": "query", ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + // "request": { +- // "$ref": "NetworkEndpointGroup" ++ // "$ref": "NetworkEdgeSecurityService" + // }, + // "response": { + // "$ref": "Operation" +@@ -123264,183 +128007,127 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope + + } + +-// method id "compute.networkEndpointGroups.list": ++// method id "compute.networkEdgeSecurityServices.patch": + +-type NetworkEndpointGroupsListCall struct { +- s *Service +- project string +- zone string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type NetworkEdgeSecurityServicesPatchCall struct { ++ s *Service ++ project string ++ region string ++ networkEdgeSecurityService string ++ networkedgesecurityservice *NetworkEdgeSecurityService ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// List: Retrieves the list of network endpoint groups that are located +-// in the specified project and zone. ++// Patch: Patches the specified policy with the data included in the ++// request. + // ++// - networkEdgeSecurityService: Name of the network edge security ++// service to update. + // - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { +- c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region scoping this request. ++func (r *NetworkEdgeSecurityServicesService) Patch(project string, region string, networkEdgeSecurityService string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesPatchCall { ++ c := &NetworkEdgeSecurityServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- return c +-} +- +-// Filter sets the optional parameter "filter": A filter expression that +-// filters resources listed in the response. Most Compute resources +-// support two types of filter expressions: expressions that support +-// regular expressions and expressions that follow API improvement +-// proposal AIP-160. If you want to use AIP-160, your expression must +-// specify the field name, an operator, and the value that you want to +-// use for filtering. The value must be a string, a number, or a +-// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +-// or `:`. For example, if you are filtering Compute Engine instances, +-// you can exclude instances named `example-instance` by specifying +-// `name != example-instance`. The `:` operator can be used with string +-// fields to match substrings. For non-string fields it is equivalent to +-// the `=` operator. The `:*` comparison can be used to test whether a +-// key has been defined. For example, to find all objects with `owner` +-// label use: ``` labels.owner:* ``` You can also filter nested fields. +-// For example, you could specify `scheduling.automaticRestart = false` +-// to include instances only if they are not scheduled for automatic +-// restarts. You can use filtering on nested fields to filter based on +-// resource labels. To filter on multiple expressions, provide each +-// separate expression within parentheses. For example: ``` +-// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +-// ``` By default, each expression is an `AND` expression. However, you +-// can include `AND` and `OR` expressions explicitly. For example: ``` +-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +-// AND (scheduling.automaticRestart = true) ``` If you want to use a +-// regular expression, use the `eq` (equal) or `ne` (not equal) operator +-// against a single un-parenthesized expression with or without quotes +-// or against multiple parenthesized expressions. Examples: `fieldname +-// eq unquoted literal` `fieldname eq 'single quoted literal'` +-// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +-// (fieldname2 ne "literal")` The literal value is interpreted as a +-// regular expression using Google RE2 library syntax. The literal value +-// must match the entire field. For example, to filter for instances +-// that do not end with name "instance", you would use `name ne +-// .*instance`. +-func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { +- c.urlParams_.Set("filter", filter) +- return c +-} +- +-// MaxResults sets the optional parameter "maxResults": The maximum +-// number of results per page that should be returned. If the number of +-// available results is larger than `maxResults`, Compute Engine returns +-// a `nextPageToken` that can be used to get the next page of results in +-// subsequent list requests. Acceptable values are `0` to `500`, +-// inclusive. (Default: `500`) +-func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { +- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ c.region = region ++ c.networkEdgeSecurityService = networkEdgeSecurityService ++ c.networkedgesecurityservice = networkedgesecurityservice + return c + } + +-// OrderBy sets the optional parameter "orderBy": Sorts list results by +-// a certain order. By default, results are returned in alphanumerical +-// order based on the resource name. You can also sort results in +-// descending order based on the creation timestamp using +-// `orderBy="creationTimestamp desc". This sorts results based on the +-// `creationTimestamp` field in reverse chronological order (newest +-// result first). Use this to sort resources like operations so that the +-// newest operation is returned first. Currently, only sorting by `name` +-// or `creationTimestamp desc` is supported. +-func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { +- c.urlParams_.Set("orderBy", orderBy) ++// Paths sets the optional parameter "paths": ++func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEdgeSecurityServicesPatchCall { ++ c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + return c + } + +-// PageToken sets the optional parameter "pageToken": Specifies a page +-// token to use. Set `pageToken` to the `nextPageToken` returned by a +-// previous list request to get the next page of results. +-func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { +- c.urlParams_.Set("pageToken", pageToken) ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkEdgeSecurityServicesPatchCall) RequestId(requestId string) *NetworkEdgeSecurityServicesPatchCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + +-// ReturnPartialSuccess sets the optional parameter +-// "returnPartialSuccess": Opt-in for partial success behavior which +-// provides partial results in case of failure. The default value is +-// false. +-func (c *NetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListCall { +- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++// UpdateMask sets the optional parameter "updateMask": Indicates fields ++// to be updated as part of this request. ++func (c *NetworkEdgeSecurityServicesPatchCall) UpdateMask(updateMask string) *NetworkEdgeSecurityServicesPatchCall { ++ c.urlParams_.Set("updateMask", updateMask) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { ++func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { ++func (c *NetworkEdgeSecurityServicesPatchCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesPatchCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsListCall) Header() http.Header { ++func (c *NetworkEdgeSecurityServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, ++ "project": c.project, ++ "region": c.region, ++ "networkEdgeSecurityService": c.networkEdgeSecurityService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.list" call. +-// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +-// Any non-2xx status code is an error. Response headers are in either +-// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { ++// Do executes the "compute.networkEdgeSecurityServices.patch" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123459,7 +128146,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEndpointGroupList{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -123471,36 +128158,26 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo + } + return ret, nil + // { +- // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", +- // "httpMethod": "GET", +- // "id": "compute.networkEndpointGroups.list", ++ // "description": "Patches the specified policy with the data included in the request.", ++ // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "httpMethod": "PATCH", ++ // "id": "compute.networkEdgeSecurityServices.patch", + // "parameterOrder": [ + // "project", +- // "zone" ++ // "region", ++ // "networkEdgeSecurityService" + // ], + // "parameters": { +- // "filter": { +- // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- // "location": "query", +- // "type": "string" +- // }, +- // "maxResults": { +- // "default": "500", +- // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- // "format": "uint32", +- // "location": "query", +- // "minimum": "0", +- // "type": "integer" +- // }, +- // "orderBy": { +- // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- // "location": "query", ++ // "networkEdgeSecurityService": { ++ // "description": "Name of the network edge security service to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, + // "type": "string" + // }, +- // "pageToken": { +- // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "paths": { + // "location": "query", ++ // "repeated": true, + // "type": "string" + // }, + // "project": { +@@ -123510,80 +128187,58 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo + // "required": true, + // "type": "string" + // }, +- // "returnPartialSuccess": { +- // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", +- // "location": "query", +- // "type": "boolean" +- // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "region": { ++ // "description": "Name of the region scoping this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "updateMask": { ++ // "description": "Indicates fields to be updated as part of this request.", ++ // "format": "google-fieldmask", ++ // "location": "query", ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "request": { ++ // "$ref": "NetworkEdgeSecurityService" ++ // }, + // "response": { +- // "$ref": "NetworkEndpointGroupList" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// Pages invokes f for each page of results. +-// A non-nil error returned from f will halt the iteration. +-// The provided context supersedes any context provided to the Context method. +-func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { +- c.ctx_ = ctx +- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point +- for { +- x, err := c.Do() +- if err != nil { +- return err +- } +- if err := f(x); err != nil { +- return err +- } +- if x.NextPageToken == "" { +- return nil +- } +- c.PageToken(x.NextPageToken) +- } +-} +- +-// method id "compute.networkEndpointGroups.listNetworkEndpoints": ++// method id "compute.networkEndpointGroups.aggregatedList": + +-type NetworkEndpointGroupsListNetworkEndpointsCall struct { +- s *Service +- project string +- zone string +- networkEndpointGroup string +- networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEndpointGroupsAggregatedListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// ListNetworkEndpoints: Lists the network endpoints in the specified +-// network endpoint group. ++// AggregatedList: Retrieves the list of network endpoint groups and ++// sorts them by zone. + // +-// - networkEndpointGroup: The name of the network endpoint group from +-// which you want to generate a list of included network endpoints. It +-// should comply with RFC1035. +-// - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { +- c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { ++ c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.networkEndpointGroup = networkEndpointGroup +- c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest + return c + } + +@@ -123622,18 +128277,31 @@ func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c + } + ++// IncludeAllScopes sets the optional parameter "includeAllScopes": ++// Indicates whether every visible scope for each scope type (zone, ++// region, global) should be included in the response. For new resource ++// types added after this field, the flag has no effect as new resource ++// types will always include every visible scope for each scope type in ++// response. For resource types which predate this field, if this flag ++// is omitted or false, only scopes of the scope types where the ++// resource type is expected to be found will be included. ++func (c *NetworkEndpointGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEndpointGroupsAggregatedListCall { ++ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++ return c ++} ++ + // MaxResults sets the optional parameter "maxResults": The maximum + // number of results per page that should be returned. If the number of + // available results is larger than `maxResults`, Compute Engine returns + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -123647,7 +128315,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults in + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -123655,7 +128323,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -123664,7 +128332,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken stri + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -123672,68 +128340,73 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(ret + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { ++func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEndpointGroups") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "networkEndpointGroup": c.networkEndpointGroup, ++ "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. +-// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +-// will be non-nil. Any non-2xx status code is an error. Response +-// headers are in either +-// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or ++// Do executes the "compute.networkEndpointGroups.aggregatedList" call. ++// Exactly one of *NetworkEndpointGroupAggregatedList or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or + // (if a response was returned at all) in + // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check + // whether the returned error was because http.StatusNotModified was + // returned. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { ++func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123752,7 +128425,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEndpointGroupsListNetworkEndpoints{ ++ ret := &NetworkEndpointGroupAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -123764,14 +128437,12 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + } + return ret, nil + // { +- // "description": "Lists the network endpoints in the specified network endpoint group.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", +- // "httpMethod": "POST", +- // "id": "compute.networkEndpointGroups.listNetworkEndpoints", ++ // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", ++ // "flatPath": "projects/{project}/aggregated/networkEndpointGroups", ++ // "httpMethod": "GET", ++ // "id": "compute.networkEndpointGroups.aggregatedList", + // "parameterOrder": [ +- // "project", +- // "zone", +- // "networkEndpointGroup" ++ // "project" + // ], + // "parameters": { + // "filter": { +@@ -123779,6 +128450,11 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + // "location": "query", + // "type": "string" + // }, ++ // "includeAllScopes": { ++ // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -123787,12 +128463,6 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + // "minimum": "0", + // "type": "integer" + // }, +- // "networkEndpointGroup": { +- // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", +@@ -123814,20 +128484,11 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" +- // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, +- // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", +- // "request": { +- // "$ref": "NetworkEndpointGroupsListEndpointsRequest" +- // }, ++ // "path": "projects/{project}/aggregated/networkEndpointGroups", + // "response": { +- // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" ++ // "$ref": "NetworkEndpointGroupAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -123841,7 +128502,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { ++func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -123859,38 +128520,57 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Contex + } + } + +-// method id "compute.networkEndpointGroups.testIamPermissions": ++// method id "compute.networkEndpointGroups.attachNetworkEndpoints": + +-type NetworkEndpointGroupsTestIamPermissionsCall struct { +- s *Service +- project string +- zone string +- resource string +- testpermissionsrequest *TestPermissionsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { ++ s *Service ++ project string ++ zone string ++ networkEndpointGroup string ++ networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. ++// AttachNetworkEndpoints: Attach a list of network endpoints to the ++// specified network endpoint group. + // +-// - project: Project ID for this request. +-// - resource: Name or id of the resource for this request. +-// - zone: The name of the zone for this request. +-func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { +- c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - networkEndpointGroup: The name of the network endpoint group where ++// you are attaching network endpoints to. It should comply with ++// RFC1035. ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { ++ c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone +- c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest ++ c.networkEndpointGroup = networkEndpointGroup ++ c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -123898,21 +128578,21 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Fiel + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -123920,14 +128600,14 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*ht + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -123935,21 +128615,21 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*ht + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "resource": c.resource, ++ "project": c.project, ++ "zone": c.zone, ++ "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123968,7 +128648,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &TestPermissionsResponse{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -123980,16 +128660,22 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", ++ // "description": "Attach a list of network endpoints to the specified network endpoint group.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "httpMethod": "POST", +- // "id": "compute.networkEndpointGroups.testIamPermissions", ++ // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", + // "parameterOrder": [ + // "project", + // "zone", +- // "resource" ++ // "networkEndpointGroup" + // ], + // "parameters": { ++ // "networkEndpointGroup": { ++ // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -123997,69 +128683,60 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", + // "type": "string" + // }, + // "zone": { +- // "description": "The name of the zone for this request.", ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "request": { +- // "$ref": "TestPermissionsRequest" ++ // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" + // }, + // "response": { +- // "$ref": "TestPermissionsResponse" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.networkFirewallPolicies.addAssociation": ++// method id "compute.networkEndpointGroups.delete": + +-type NetworkFirewallPoliciesAddAssociationCall struct { +- s *Service +- project string +- firewallPolicy string +- firewallpolicyassociation *FirewallPolicyAssociation +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEndpointGroupsDeleteCall struct { ++ s *Service ++ project string ++ zone string ++ networkEndpointGroup string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// AddAssociation: Inserts an association for the specified firewall +-// policy. ++// Delete: Deletes the specified network endpoint group. The network ++// endpoints in the NEG and the VM instances they belong to are not ++// terminated when the NEG is deleted. Note that the NEG cannot be ++// deleted if there are backend services referencing it. + // +-// - firewallPolicy: Name of the firewall policy to update. +-// - project: Project ID for this request. +-func (r *NetworkFirewallPoliciesService) AddAssociation(project string, firewallPolicy string, firewallpolicyassociation *FirewallPolicyAssociation) *NetworkFirewallPoliciesAddAssociationCall { +- c := &NetworkFirewallPoliciesAddAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - networkEndpointGroup: The name of the network endpoint group to ++// delete. It should comply with RFC1035. ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { ++ c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.firewallPolicy = firewallPolicy +- c.firewallpolicyassociation = firewallpolicyassociation +- return c +-} +- +-// ReplaceExistingAssociation sets the optional parameter +-// "replaceExistingAssociation": Indicates whether or not to replace it +-// if an association of the attachment already exists. This is false by +-// default, in which case an error will be returned if an association +-// already exists. +-func (c *NetworkFirewallPoliciesAddAssociationCall) ReplaceExistingAssociation(replaceExistingAssociation bool) *NetworkFirewallPoliciesAddAssociationCall { +- c.urlParams_.Set("replaceExistingAssociation", fmt.Sprint(replaceExistingAssociation)) ++ c.zone = zone ++ c.networkEndpointGroup = networkEndpointGroup + return c + } + +@@ -124074,7 +128751,7 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) ReplaceExistingAssociation(r + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkFirewallPoliciesAddAssociationCall) RequestId(requestId string) *NetworkFirewallPoliciesAddAssociationCall { ++func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -124082,7 +128759,7 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) RequestId(requestId string) + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkFirewallPoliciesAddAssociationCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesAddAssociationCall { ++func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124090,21 +128767,21 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) Fields(s ...googleapi.Field) + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkFirewallPoliciesAddAssociationCall) Context(ctx context.Context) *NetworkFirewallPoliciesAddAssociationCall { ++func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkFirewallPoliciesAddAssociationCall) Header() http.Header { ++func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkFirewallPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124112,35 +128789,31 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) doRequest(alt string) (*http + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyassociation) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "firewallPolicy": c.firewallPolicy, ++ "project": c.project, ++ "zone": c.zone, ++ "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkFirewallPolicies.addAssociation" call. ++// Do executes the "compute.networkEndpointGroups.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124171,19 +128844,19 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOpt + } + return ret, nil + // { +- // "description": "Inserts an association for the specified firewall policy.", +- // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", +- // "httpMethod": "POST", +- // "id": "compute.networkFirewallPolicies.addAssociation", ++ // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.networkEndpointGroups.delete", + // "parameterOrder": [ + // "project", +- // "firewallPolicy" ++ // "zone", ++ // "networkEndpointGroup" + // ], + // "parameters": { +- // "firewallPolicy": { +- // "description": "Name of the firewall policy to update.", ++ // "networkEndpointGroup": { ++ // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -124194,21 +128867,19 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOpt + // "required": true, + // "type": "string" + // }, +- // "replaceExistingAssociation": { +- // "description": "Indicates whether or not to replace it if an association of the attachment already exists. This is false by default, in which case an error will be returned if an association already exists.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", +- // "request": { +- // "$ref": "FirewallPolicyAssociation" +- // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "response": { + // "$ref": "Operation" + // }, +@@ -124220,45 +128891,33 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOpt + + } + +-// method id "compute.networkFirewallPolicies.addRule": ++// method id "compute.networkEndpointGroups.detachNetworkEndpoints": + +-type NetworkFirewallPoliciesAddRuleCall struct { +- s *Service +- project string +- firewallPolicy string +- firewallpolicyrule *FirewallPolicyRule +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { ++ s *Service ++ project string ++ zone string ++ networkEndpointGroup string ++ networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// AddRule: Inserts a rule into a firewall policy. ++// DetachNetworkEndpoints: Detach a list of network endpoints from the ++// specified network endpoint group. + // +-// - firewallPolicy: Name of the firewall policy to update. +-// - project: Project ID for this request. +-func (r *NetworkFirewallPoliciesService) AddRule(project string, firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *NetworkFirewallPoliciesAddRuleCall { +- c := &NetworkFirewallPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - networkEndpointGroup: The name of the network endpoint group where ++// you are removing network endpoints. It should comply with RFC1035. ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { ++ c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.firewallPolicy = firewallPolicy +- c.firewallpolicyrule = firewallpolicyrule +- return c +-} +- +-// MaxPriority sets the optional parameter "maxPriority": When +-// rule.priority is not specified, auto choose a unused priority between +-// minPriority and maxPriority>. This field is exclusive with +-// rule.priority. +-func (c *NetworkFirewallPoliciesAddRuleCall) MaxPriority(maxPriority int64) *NetworkFirewallPoliciesAddRuleCall { +- c.urlParams_.Set("maxPriority", fmt.Sprint(maxPriority)) +- return c +-} +- +-// MinPriority sets the optional parameter "minPriority": When +-// rule.priority is not specified, auto choose a unused priority between +-// minPriority and maxPriority>. This field is exclusive with +-// rule.priority. +-func (c *NetworkFirewallPoliciesAddRuleCall) MinPriority(minPriority int64) *NetworkFirewallPoliciesAddRuleCall { +- c.urlParams_.Set("minPriority", fmt.Sprint(minPriority)) ++ c.zone = zone ++ c.networkEndpointGroup = networkEndpointGroup ++ c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest + return c + } + +@@ -124273,7 +128932,7 @@ func (c *NetworkFirewallPoliciesAddRuleCall) MinPriority(minPriority int64) *Net + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkFirewallPoliciesAddRuleCall) RequestId(requestId string) *NetworkFirewallPoliciesAddRuleCall { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -124281,7 +128940,7 @@ func (c *NetworkFirewallPoliciesAddRuleCall) RequestId(requestId string) *Networ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkFirewallPoliciesAddRuleCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesAddRuleCall { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124289,21 +128948,21 @@ func (c *NetworkFirewallPoliciesAddRuleCall) Fields(s ...googleapi.Field) *Netwo + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkFirewallPoliciesAddRuleCall) Context(ctx context.Context) *NetworkFirewallPoliciesAddRuleCall { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkFirewallPoliciesAddRuleCall) Header() http.Header { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkFirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124311,14 +128970,14 @@ func (c *NetworkFirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Respon + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -124326,20 +128985,21 @@ func (c *NetworkFirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Respon + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "firewallPolicy": c.firewallPolicy, ++ "project": c.project, ++ "zone": c.zone, ++ "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkFirewallPolicies.addRule" call. ++// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124370,34 +129030,22 @@ func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { +- // "description": "Inserts a rule into a firewall policy.", +- // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", ++ // "description": "Detach a list of network endpoints from the specified network endpoint group.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "httpMethod": "POST", +- // "id": "compute.networkFirewallPolicies.addRule", ++ // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", + // "parameterOrder": [ + // "project", +- // "firewallPolicy" ++ // "zone", ++ // "networkEndpointGroup" + // ], + // "parameters": { +- // "firewallPolicy": { +- // "description": "Name of the firewall policy to update.", ++ // "networkEndpointGroup": { ++ // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +- // "maxPriority": { +- // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", +- // "format": "int32", +- // "location": "query", +- // "type": "integer" +- // }, +- // "minPriority": { +- // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", +- // "format": "int32", +- // "location": "query", +- // "type": "integer" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -124409,11 +129057,17 @@ func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (* + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "request": { +- // "$ref": "FirewallPolicyRule" ++ // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -124426,25 +129080,1536 @@ func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (* + + } + +-// method id "compute.networkFirewallPolicies.cloneRules": ++// method id "compute.networkEndpointGroups.get": + +-type NetworkFirewallPoliciesCloneRulesCall struct { +- s *Service +- project string +- firewallPolicy string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEndpointGroupsGetCall struct { ++ s *Service ++ project string ++ zone string ++ networkEndpointGroup string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// CloneRules: Copies rules to the specified firewall policy. ++// Get: Returns the specified network endpoint group. + // +-// - firewallPolicy: Name of the firewall policy to update. +-// - project: Project ID for this request. +-func (r *NetworkFirewallPoliciesService) CloneRules(project string, firewallPolicy string) *NetworkFirewallPoliciesCloneRulesCall { +- c := &NetworkFirewallPoliciesCloneRulesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.firewallPolicy = firewallPolicy ++// - networkEndpointGroup: The name of the network endpoint group. It ++// should comply with RFC1035. ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { ++ c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.networkEndpointGroup = networkEndpointGroup ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEndpointGroupsGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "networkEndpointGroup": c.networkEndpointGroup, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEndpointGroups.get" call. ++// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *NetworkEndpointGroup.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &NetworkEndpointGroup{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns the specified network endpoint group.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", ++ // "httpMethod": "GET", ++ // "id": "compute.networkEndpointGroups.get", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "networkEndpointGroup" ++ // ], ++ // "parameters": { ++ // "networkEndpointGroup": { ++ // "description": "The name of the network endpoint group. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", ++ // "response": { ++ // "$ref": "NetworkEndpointGroup" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkEndpointGroups.insert": ++ ++type NetworkEndpointGroupsInsertCall struct { ++ s *Service ++ project string ++ zone string ++ networkendpointgroup *NetworkEndpointGroup ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates a network endpoint group in the specified project ++// using the parameters that are included in the request. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone where you want to create the network ++// endpoint group. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { ++ c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.networkendpointgroup = networkendpointgroup ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEndpointGroups.insert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "httpMethod": "POST", ++ // "id": "compute.networkEndpointGroups.insert", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "request": { ++ // "$ref": "NetworkEndpointGroup" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkEndpointGroups.list": ++ ++type NetworkEndpointGroupsListCall struct { ++ s *Service ++ project string ++ zone string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// List: Retrieves the list of network endpoint groups that are located ++// in the specified project and zone. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { ++ c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *NetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEndpointGroupsListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEndpointGroups.list" call. ++// Exactly one of *NetworkEndpointGroupList or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &NetworkEndpointGroupList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "httpMethod": "GET", ++ // "id": "compute.networkEndpointGroups.list", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "response": { ++ // "$ref": "NetworkEndpointGroupList" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.networkEndpointGroups.listNetworkEndpoints": ++ ++type NetworkEndpointGroupsListNetworkEndpointsCall struct { ++ s *Service ++ project string ++ zone string ++ networkEndpointGroup string ++ networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// ListNetworkEndpoints: Lists the network endpoints in the specified ++// network endpoint group. ++// ++// - networkEndpointGroup: The name of the network endpoint group from ++// which you want to generate a list of included network endpoints. It ++// should comply with RFC1035. ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.networkEndpointGroup = networkEndpointGroup ++ c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "networkEndpointGroup": c.networkEndpointGroup, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. ++// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error ++// will be non-nil. Any non-2xx status code is an error. Response ++// headers are in either ++// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &NetworkEndpointGroupsListNetworkEndpoints{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Lists the network endpoints in the specified network endpoint group.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", ++ // "httpMethod": "POST", ++ // "id": "compute.networkEndpointGroups.listNetworkEndpoints", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "networkEndpointGroup" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "networkEndpointGroup": { ++ // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", ++ // "request": { ++ // "$ref": "NetworkEndpointGroupsListEndpointsRequest" ++ // }, ++ // "response": { ++ // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.networkEndpointGroups.testIamPermissions": ++ ++type NetworkEndpointGroupsTestIamPermissionsCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { ++ c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.networkEndpointGroups.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkFirewallPolicies.addAssociation": ++ ++type NetworkFirewallPoliciesAddAssociationCall struct { ++ s *Service ++ project string ++ firewallPolicy string ++ firewallpolicyassociation *FirewallPolicyAssociation ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// AddAssociation: Inserts an association for the specified firewall ++// policy. ++// ++// - firewallPolicy: Name of the firewall policy to update. ++// - project: Project ID for this request. ++func (r *NetworkFirewallPoliciesService) AddAssociation(project string, firewallPolicy string, firewallpolicyassociation *FirewallPolicyAssociation) *NetworkFirewallPoliciesAddAssociationCall { ++ c := &NetworkFirewallPoliciesAddAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.firewallPolicy = firewallPolicy ++ c.firewallpolicyassociation = firewallpolicyassociation ++ return c ++} ++ ++// ReplaceExistingAssociation sets the optional parameter ++// "replaceExistingAssociation": Indicates whether or not to replace it ++// if an association of the attachment already exists. This is false by ++// default, in which case an error will be returned if an association ++// already exists. ++func (c *NetworkFirewallPoliciesAddAssociationCall) ReplaceExistingAssociation(replaceExistingAssociation bool) *NetworkFirewallPoliciesAddAssociationCall { ++ c.urlParams_.Set("replaceExistingAssociation", fmt.Sprint(replaceExistingAssociation)) ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkFirewallPoliciesAddAssociationCall) RequestId(requestId string) *NetworkFirewallPoliciesAddAssociationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkFirewallPoliciesAddAssociationCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesAddAssociationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkFirewallPoliciesAddAssociationCall) Context(ctx context.Context) *NetworkFirewallPoliciesAddAssociationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkFirewallPoliciesAddAssociationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkFirewallPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyassociation) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "firewallPolicy": c.firewallPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkFirewallPolicies.addAssociation" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Inserts an association for the specified firewall policy.", ++ // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", ++ // "httpMethod": "POST", ++ // "id": "compute.networkFirewallPolicies.addAssociation", ++ // "parameterOrder": [ ++ // "project", ++ // "firewallPolicy" ++ // ], ++ // "parameters": { ++ // "firewallPolicy": { ++ // "description": "Name of the firewall policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "replaceExistingAssociation": { ++ // "description": "Indicates whether or not to replace it if an association of the attachment already exists. This is false by default, in which case an error will be returned if an association already exists.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", ++ // "request": { ++ // "$ref": "FirewallPolicyAssociation" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkFirewallPolicies.addRule": ++ ++type NetworkFirewallPoliciesAddRuleCall struct { ++ s *Service ++ project string ++ firewallPolicy string ++ firewallpolicyrule *FirewallPolicyRule ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// AddRule: Inserts a rule into a firewall policy. ++// ++// - firewallPolicy: Name of the firewall policy to update. ++// - project: Project ID for this request. ++func (r *NetworkFirewallPoliciesService) AddRule(project string, firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *NetworkFirewallPoliciesAddRuleCall { ++ c := &NetworkFirewallPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.firewallPolicy = firewallPolicy ++ c.firewallpolicyrule = firewallpolicyrule ++ return c ++} ++ ++// MaxPriority sets the optional parameter "maxPriority": When ++// rule.priority is not specified, auto choose a unused priority between ++// minPriority and maxPriority>. This field is exclusive with ++// rule.priority. ++func (c *NetworkFirewallPoliciesAddRuleCall) MaxPriority(maxPriority int64) *NetworkFirewallPoliciesAddRuleCall { ++ c.urlParams_.Set("maxPriority", fmt.Sprint(maxPriority)) ++ return c ++} ++ ++// MinPriority sets the optional parameter "minPriority": When ++// rule.priority is not specified, auto choose a unused priority between ++// minPriority and maxPriority>. This field is exclusive with ++// rule.priority. ++func (c *NetworkFirewallPoliciesAddRuleCall) MinPriority(minPriority int64) *NetworkFirewallPoliciesAddRuleCall { ++ c.urlParams_.Set("minPriority", fmt.Sprint(minPriority)) ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkFirewallPoliciesAddRuleCall) RequestId(requestId string) *NetworkFirewallPoliciesAddRuleCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkFirewallPoliciesAddRuleCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesAddRuleCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkFirewallPoliciesAddRuleCall) Context(ctx context.Context) *NetworkFirewallPoliciesAddRuleCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkFirewallPoliciesAddRuleCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkFirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "firewallPolicy": c.firewallPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkFirewallPolicies.addRule" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Inserts a rule into a firewall policy.", ++ // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", ++ // "httpMethod": "POST", ++ // "id": "compute.networkFirewallPolicies.addRule", ++ // "parameterOrder": [ ++ // "project", ++ // "firewallPolicy" ++ // ], ++ // "parameters": { ++ // "firewallPolicy": { ++ // "description": "Name of the firewall policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "maxPriority": { ++ // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "minPriority": { ++ // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", ++ // "request": { ++ // "$ref": "FirewallPolicyRule" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkFirewallPolicies.cloneRules": ++ ++type NetworkFirewallPoliciesCloneRulesCall struct { ++ s *Service ++ project string ++ firewallPolicy string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// CloneRules: Copies rules to the specified firewall policy. ++// ++// - firewallPolicy: Name of the firewall policy to update. ++// - project: Project ID for this request. ++func (r *NetworkFirewallPoliciesService) CloneRules(project string, firewallPolicy string) *NetworkFirewallPoliciesCloneRulesCall { ++ c := &NetworkFirewallPoliciesCloneRulesCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.firewallPolicy = firewallPolicy + return c + } + +@@ -145881,9 +152046,367 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", ++ // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", ++ // "request": { ++ // "$ref": "BackendService" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionBackendServices.setIamPolicy": ++ ++type RegionBackendServicesSetIamPolicyCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ regionsetpolicyrequest *RegionSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *RegionBackendServicesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionBackendServicesSetIamPolicyCall { ++ c := &RegionBackendServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.resource = resource ++ c.regionsetpolicyrequest = regionsetpolicyrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionBackendServicesSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesSetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionBackendServicesSetIamPolicyCall) Context(ctx context.Context) *RegionBackendServicesSetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionBackendServicesSetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionBackendServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionBackendServices.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *RegionBackendServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.regionBackendServices.setIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", ++ // "request": { ++ // "$ref": "RegionSetPolicyRequest" ++ // }, ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionBackendServices.setSecurityPolicy": ++ ++type RegionBackendServicesSetSecurityPolicyCall struct { ++ s *Service ++ project string ++ region string ++ backendService string ++ securitypolicyreference *SecurityPolicyReference ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified backend service. For more information, see Google Cloud ++// Armor Overview ++// ++// - backendService: Name of the BackendService resource to which the ++// security policy should be set. The name should conform to RFC1035. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionBackendServicesService) SetSecurityPolicy(project string, region string, backendService string, securitypolicyreference *SecurityPolicyReference) *RegionBackendServicesSetSecurityPolicyCall { ++ c := &RegionBackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.backendService = backendService ++ c.securitypolicyreference = securitypolicyreference ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionBackendServicesSetSecurityPolicyCall) RequestId(requestId string) *RegionBackendServicesSetSecurityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionBackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesSetSecurityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionBackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *RegionBackendServicesSetSecurityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionBackendServicesSetSecurityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionBackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "backendService": c.backendService, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionBackendServices.setSecurityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionBackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.regionBackendServices.setSecurityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "backendService" ++ // ], ++ // "parameters": { ++ // "backendService": { ++ // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", + // "request": { +- // "$ref": "BackendService" ++ // "$ref": "SecurityPolicyReference" + // }, + // "response": { + // "$ref": "Operation" +@@ -145896,174 +152419,6 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper + + } + +-// method id "compute.regionBackendServices.setIamPolicy": +- +-type RegionBackendServicesSetIamPolicyCall struct { +- s *Service +- project string +- region string +- resource string +- regionsetpolicyrequest *RegionSetPolicyRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header +-} +- +-// SetIamPolicy: Sets the access control policy on the specified +-// resource. Replaces any existing policy. +-// +-// - project: Project ID for this request. +-// - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *RegionBackendServicesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionBackendServicesSetIamPolicyCall { +- c := &RegionBackendServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.region = region +- c.resource = resource +- c.regionsetpolicyrequest = regionsetpolicyrequest +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *RegionBackendServicesSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesSetIamPolicyCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *RegionBackendServicesSetIamPolicyCall) Context(ctx context.Context) *RegionBackendServicesSetIamPolicyCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *RegionBackendServicesSetIamPolicyCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *RegionBackendServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.regionBackendServices.setIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *RegionBackendServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &Policy{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", +- // "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", +- // "httpMethod": "POST", +- // "id": "compute.regionBackendServices.setIamPolicy", +- // "parameterOrder": [ +- // "project", +- // "region", +- // "resource" +- // ], +- // "parameters": { +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // }, +- // "region": { +- // "description": "The name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", +- // "request": { +- // "$ref": "RegionSetPolicyRequest" +- // }, +- // "response": { +- // "$ref": "Policy" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" +- // ] +- // } +- +-} +- + // method id "compute.regionBackendServices.testIamPermissions": + + type RegionBackendServicesTestIamPermissionsCall struct { +@@ -148591,6 +154946,182 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* + + } + ++// method id "compute.regionDisks.bulkInsert": ++ ++type RegionDisksBulkInsertCall struct { ++ s *Service ++ project string ++ region string ++ bulkinsertdiskresource *BulkInsertDiskResource ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// BulkInsert: Bulk create a set of disks. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++func (r *RegionDisksService) BulkInsert(project string, region string, bulkinsertdiskresource *BulkInsertDiskResource) *RegionDisksBulkInsertCall { ++ c := &RegionDisksBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.bulkinsertdiskresource = bulkinsertdiskresource ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionDisksBulkInsertCall) RequestId(requestId string) *RegionDisksBulkInsertCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionDisksBulkInsertCall) Fields(s ...googleapi.Field) *RegionDisksBulkInsertCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionDisksBulkInsertCall) Context(ctx context.Context) *RegionDisksBulkInsertCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionDisksBulkInsertCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionDisksBulkInsertCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertdiskresource) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/bulkInsert") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionDisks.bulkInsert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionDisksBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Bulk create a set of disks.", ++ // "flatPath": "projects/{project}/regions/{region}/disks/bulkInsert", ++ // "httpMethod": "POST", ++ // "id": "compute.regionDisks.bulkInsert", ++ // "parameterOrder": [ ++ // "project", ++ // "region" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/disks/bulkInsert", ++ // "request": { ++ // "$ref": "BulkInsertDiskResource" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionDisks.createSnapshot": + + type RegionDisksCreateSnapshotCall struct { +@@ -150534,6 +157065,553 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + ++// method id "compute.regionDisks.startAsyncReplication": ++ ++type RegionDisksStartAsyncReplicationCall struct { ++ s *Service ++ project string ++ region string ++ disk string ++ regiondisksstartasyncreplicationrequest *RegionDisksStartAsyncReplicationRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// StartAsyncReplication: Starts asynchronous replication. Must be ++// invoked on the primary disk. ++// ++// - disk: The name of the persistent disk. ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++func (r *RegionDisksService) StartAsyncReplication(project string, region string, disk string, regiondisksstartasyncreplicationrequest *RegionDisksStartAsyncReplicationRequest) *RegionDisksStartAsyncReplicationCall { ++ c := &RegionDisksStartAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.disk = disk ++ c.regiondisksstartasyncreplicationrequest = regiondisksstartasyncreplicationrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionDisksStartAsyncReplicationCall) RequestId(requestId string) *RegionDisksStartAsyncReplicationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionDisksStartAsyncReplicationCall) Fields(s ...googleapi.Field) *RegionDisksStartAsyncReplicationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionDisksStartAsyncReplicationCall) Context(ctx context.Context) *RegionDisksStartAsyncReplicationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionDisksStartAsyncReplicationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionDisksStartAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksstartasyncreplicationrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "disk": c.disk, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionDisks.startAsyncReplication" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionDisksStartAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Starts asynchronous replication. Must be invoked on the primary disk.", ++ // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", ++ // "httpMethod": "POST", ++ // "id": "compute.regionDisks.startAsyncReplication", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "disk" ++ // ], ++ // "parameters": { ++ // "disk": { ++ // "description": "The name of the persistent disk.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", ++ // "request": { ++ // "$ref": "RegionDisksStartAsyncReplicationRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionDisks.stopAsyncReplication": ++ ++type RegionDisksStopAsyncReplicationCall struct { ++ s *Service ++ project string ++ region string ++ disk string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// StopAsyncReplication: Stops asynchronous replication. Can be invoked ++// either on the primary or on the secondary disk. ++// ++// - disk: The name of the persistent disk. ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++func (r *RegionDisksService) StopAsyncReplication(project string, region string, disk string) *RegionDisksStopAsyncReplicationCall { ++ c := &RegionDisksStopAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.disk = disk ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionDisksStopAsyncReplicationCall) RequestId(requestId string) *RegionDisksStopAsyncReplicationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionDisksStopAsyncReplicationCall) Fields(s ...googleapi.Field) *RegionDisksStopAsyncReplicationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionDisksStopAsyncReplicationCall) Context(ctx context.Context) *RegionDisksStopAsyncReplicationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionDisksStopAsyncReplicationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionDisksStopAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "disk": c.disk, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionDisks.stopAsyncReplication" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionDisksStopAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", ++ // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", ++ // "httpMethod": "POST", ++ // "id": "compute.regionDisks.stopAsyncReplication", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "disk" ++ // ], ++ // "parameters": { ++ // "disk": { ++ // "description": "The name of the persistent disk.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionDisks.stopGroupAsyncReplication": ++ ++type RegionDisksStopGroupAsyncReplicationCall struct { ++ s *Service ++ project string ++ region string ++ disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// StopGroupAsyncReplication: Stops asynchronous replication for a ++// consistency group of disks. Can be invoked either in the primary or ++// secondary scope. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. This must be the ++// region of the primary or secondary disks in the consistency group. ++func (r *RegionDisksService) StopGroupAsyncReplication(project string, region string, disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource) *RegionDisksStopGroupAsyncReplicationCall { ++ c := &RegionDisksStopGroupAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.disksstopgroupasyncreplicationresource = disksstopgroupasyncreplicationresource ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionDisksStopGroupAsyncReplicationCall) RequestId(requestId string) *RegionDisksStopGroupAsyncReplicationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionDisksStopGroupAsyncReplicationCall) Fields(s ...googleapi.Field) *RegionDisksStopGroupAsyncReplicationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionDisksStopGroupAsyncReplicationCall) Context(ctx context.Context) *RegionDisksStopGroupAsyncReplicationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionDisksStopGroupAsyncReplicationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionDisksStopGroupAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksstopgroupasyncreplicationresource) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionDisks.stopGroupAsyncReplication" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionDisksStopGroupAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", ++ // "flatPath": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", ++ // "httpMethod": "POST", ++ // "id": "compute.regionDisks.stopGroupAsyncReplication", ++ // "parameterOrder": [ ++ // "project", ++ // "region" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request. This must be the region of the primary or secondary disks in the consistency group.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", ++ // "request": { ++ // "$ref": "DisksStopGroupAsyncReplicationResource" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionDisks.testIamPermissions": + + type RegionDisksTestIamPermissionsCall struct { +@@ -155928,7 +163006,1025 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuc + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListPerInstanceConfigsCall { ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListPerInstanceConfigsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListPerInstanceConfigsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "instanceGroupManager": c.instanceGroupManager, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.listPerInstanceConfigs" call. ++// Exactly one of *RegionInstanceGroupManagersListInstanceConfigsResp or ++// error will be non-nil. Any non-2xx status code is an error. Response ++// headers are in either ++// *RegionInstanceGroupManagersListInstanceConfigsResp.ServerResponse.Hea ++// der or (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstanceConfigsResp, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Lists all of the per-instance configurations defined for the managed instance group. The orderBy query parameter is not supported.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "instanceGroupManager" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "instanceGroupManager": { ++ // "description": "The name of the managed instance group. It should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request, should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", ++ // "response": { ++ // "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstanceConfigsResp) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.regionInstanceGroupManagers.patch": ++ ++type RegionInstanceGroupManagersPatchCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ instancegroupmanager *InstanceGroupManager ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Patch: Updates a managed instance group using the information that ++// you specify in the request. This operation is marked as DONE when the ++// group is patched even if the instances in the group are still in the ++// process of being patched. You must separately verify the status of ++// the individual instances with the listmanagedinstances method. This ++// method supports PATCH semantics and uses the JSON merge patch format ++// and processing rules. If you update your group to specify a new ++// template or instance configuration, it's possible that your intended ++// specification for each VM in the group is different from the current ++// state of that VM. To learn how to apply an updated configuration to ++// the VMs in a MIG, see Updating instances in a MIG. ++// ++// - instanceGroupManager: The name of the instance group manager. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { ++ c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instanceGroupManager = instanceGroupManager ++ c.instancegroupmanager = instancegroupmanager ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("PATCH", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "instanceGroupManager": c.instanceGroupManager, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.patch" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", ++ // "httpMethod": "PATCH", ++ // "id": "compute.regionInstanceGroupManagers.patch", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "instanceGroupManager" ++ // ], ++ // "parameters": { ++ // "instanceGroupManager": { ++ // "description": "The name of the instance group manager.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", ++ // "request": { ++ // "$ref": "InstanceGroupManager" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroupManagers.patchPerInstanceConfigs": ++ ++type RegionInstanceGroupManagersPatchPerInstanceConfigsCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// PatchPerInstanceConfigs: Inserts or patches per-instance ++// configurations for the managed instance group. perInstanceConfig.name ++// serves as a key used to distinguish whether to perform insert or ++// patch. ++// ++// - instanceGroupManager: The name of the managed instance group. It ++// should conform to RFC1035. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request, should conform to ++// RFC1035. ++func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++ c := &RegionInstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instanceGroupManager = instanceGroupManager ++ c.regioninstancegroupmanagerpatchinstanceconfigreq = regioninstancegroupmanagerpatchinstanceconfigreq ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerpatchinstanceconfigreq) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "instanceGroupManager": c.instanceGroupManager, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.patchPerInstanceConfigs" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Inserts or patches per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "instanceGroupManager" ++ // ], ++ // "parameters": { ++ // "instanceGroupManager": { ++ // "description": "The name of the managed instance group. It should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request, should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", ++ // "request": { ++ // "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroupManagers.recreateInstances": ++ ++type RegionInstanceGroupManagersRecreateInstancesCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// RecreateInstances: Flags the specified VM instances in the managed ++// instance group to be immediately recreated. Each instance is ++// recreated using the group's current configuration. This operation is ++// marked as DONE when the flag is set even if the instances have not ++// yet been recreated. You must separately verify the status of each ++// instance by checking its currentAction field; for more information, ++// see Checking the status of managed instances. If the group is part of ++// a backend service that has enabled connection draining, it can take ++// up to 60 seconds after the connection draining duration has elapsed ++// before the VM instance is removed or deleted. You can specify a ++// maximum of 1000 instances with this method per request. ++// ++// - instanceGroupManager: Name of the managed instance group. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { ++ c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instanceGroupManager = instanceGroupManager ++ c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "instanceGroupManager": c.instanceGroupManager, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.recreateInstances", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "instanceGroupManager" ++ // ], ++ // "parameters": { ++ // "instanceGroupManager": { ++ // "description": "Name of the managed instance group.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", ++ // "request": { ++ // "$ref": "RegionInstanceGroupManagersRecreateRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroupManagers.resize": ++ ++type RegionInstanceGroupManagersResizeCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Resize: Changes the intended size of the managed instance group. If ++// you increase the size, the group creates new instances using the ++// current instance template. If you decrease the size, the group ++// deletes one or more instances. The resize operation is marked DONE if ++// the resize request is successful. The underlying actions take ++// additional time. You must separately verify the status of the ++// creating or deleting actions with the listmanagedinstances method. If ++// the group is part of a backend service that has enabled connection ++// draining, it can take up to 60 seconds after the connection draining ++// duration has elapsed before the VM instance is removed or deleted. ++// ++// - instanceGroupManager: Name of the managed instance group. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - size: Number of instances that should exist in this instance group ++// manager. ++func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { ++ c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instanceGroupManager = instanceGroupManager ++ c.urlParams_.Set("size", fmt.Sprint(size)) ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "instanceGroupManager": c.instanceGroupManager, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.resize" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.resize", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "instanceGroupManager", ++ // "size" ++ // ], ++ // "parameters": { ++ // "instanceGroupManager": { ++ // "description": "Name of the managed instance group.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "size": { ++ // "description": "Number of instances that should exist in this instance group manager.", ++ // "format": "int32", ++ // "location": "query", ++ // "minimum": "0", ++ // "required": true, ++ // "type": "integer" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroupManagers.resizeAdvanced": ++ ++type RegionInstanceGroupManagersResizeAdvancedCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagersresizeadvancedrequest *RegionInstanceGroupManagersResizeAdvancedRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// ResizeAdvanced: Resizes the regional managed instance group with ++// advanced configuration options like disabling creation retries. This ++// is an extended version of the resize method. If you increase the ++// size, the group creates new instances using the current instance ++// template. If you decrease the size, the group deletes one or more ++// instances. The resize operation is marked DONE if the resize request ++// is successful. The underlying actions take additional time. You must ++// separately verify the status of the creating or deleting actions with ++// the get or listmanagedinstances method. If the group is part of a ++// backend service that has enabled connection draining, it can take up ++// to 60 seconds after the connection draining duration has elapsed ++// before the VM instance is removed or deleted. ++// ++// - instanceGroupManager: The name of the managed instance group. It ++// must be a string that meets the requirements in RFC1035. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. It must be a ++// string that meets the requirements in RFC1035. ++func (r *RegionInstanceGroupManagersService) ResizeAdvanced(project string, region string, instanceGroupManager string, regioninstancegroupmanagersresizeadvancedrequest *RegionInstanceGroupManagersResizeAdvancedRequest) *RegionInstanceGroupManagersResizeAdvancedCall { ++ c := &RegionInstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instanceGroupManager = instanceGroupManager ++ c.regioninstancegroupmanagersresizeadvancedrequest = regioninstancegroupmanagersresizeadvancedrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeAdvancedCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeAdvancedCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -155936,21 +164032,21 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...goog + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListPerInstanceConfigsCall { ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeAdvancedCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -155958,9 +164054,14 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt st + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersresizeadvancedrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -155975,16 +164076,14 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt st + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.listPerInstanceConfigs" call. +-// Exactly one of *RegionInstanceGroupManagersListInstanceConfigsResp or +-// error will be non-nil. Any non-2xx status code is an error. Response +-// headers are in either +-// *RegionInstanceGroupManagersListInstanceConfigsResp.ServerResponse.Hea +-// der or (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstanceConfigsResp, error) { ++// Do executes the "compute.regionInstanceGroupManagers.resizeAdvanced" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -156003,7 +164102,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -156015,45 +164114,22 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl + } + return ret, nil + // { +- // "description": "Lists all of the per-instance configurations defined for the managed instance group. The orderBy query parameter is not supported.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", ++ // "description": "Resizes the regional managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the get or listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", ++ // "id": "compute.regionInstanceGroupManagers.resizeAdvanced", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { +- // "filter": { +- // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- // "location": "query", +- // "type": "string" +- // }, + // "instanceGroupManager": { +- // "description": "The name of the managed instance group. It should conform to RFC1035.", ++ // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, +- // "maxResults": { +- // "default": "500", +- // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- // "format": "uint32", +- // "location": "query", +- // "minimum": "0", +- // "type": "integer" +- // }, +- // "orderBy": { +- // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- // "location": "query", +- // "type": "string" +- // }, +- // "pageToken": { +- // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +- // "location": "query", +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -156062,85 +164138,58 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request, should conform to RFC1035.", ++ // "description": "Name of the region scoping this request. It must be a string that meets the requirements in RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, +- // "returnPartialSuccess": { +- // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", +- // "type": "boolean" ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", ++ // "request": { ++ // "$ref": "RegionInstanceGroupManagersResizeAdvancedRequest" ++ // }, + // "response": { +- // "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// Pages invokes f for each page of results. +-// A non-nil error returned from f will halt the iteration. +-// The provided context supersedes any context provided to the Context method. +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstanceConfigsResp) error) error { +- c.ctx_ = ctx +- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point +- for { +- x, err := c.Do() +- if err != nil { +- return err +- } +- if err := f(x); err != nil { +- return err +- } +- if x.NextPageToken == "" { +- return nil +- } +- c.PageToken(x.NextPageToken) +- } +-} +- +-// method id "compute.regionInstanceGroupManagers.patch": ++// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": + +-type RegionInstanceGroupManagersPatchCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- instancegroupmanager *InstanceGroupManager +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Patch: Updates a managed instance group using the information that +-// you specify in the request. This operation is marked as DONE when the +-// group is patched even if the instances in the group are still in the +-// process of being patched. You must separately verify the status of +-// the individual instances with the listmanagedinstances method. This +-// method supports PATCH semantics and uses the JSON merge patch format +-// and processing rules. If you update your group to specify a new +-// template or instance configuration, it's possible that your intended +-// specification for each VM in the group is different from the current +-// state of that VM. To learn how to apply an updated configuration to +-// the VMs in a MIG, see Updating instances in a MIG. ++// SetAutoHealingPolicies: Modifies the autohealing policy for the ++// instances in this managed instance group. [Deprecated] This method is ++// deprecated. Use regionInstanceGroupManagers.patch instead. + // +-// - instanceGroupManager: The name of the instance group manager. ++// - instanceGroupManager: Name of the managed instance group. + // - project: Project ID for this request. + // - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { +- c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { ++ c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager +- c.instancegroupmanager = instancegroupmanager ++ c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest + return c + } + +@@ -156155,7 +164204,7 @@ func (r *RegionInstanceGroupManagersService) Patch(project string, region string + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -156163,7 +164212,7 @@ func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *Regi + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -156171,21 +164220,21 @@ func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *Reg + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -156193,16 +164242,16 @@ func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Resp + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -156215,14 +164264,14 @@ func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Resp + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.patch" call. ++// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -156253,10 +164302,11 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", +- // "httpMethod": "PATCH", +- // "id": "compute.regionInstanceGroupManagers.patch", ++ // "deprecated": true, ++ // "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use regionInstanceGroupManagers.patch instead.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", + // "parameterOrder": [ + // "project", + // "region", +@@ -156264,7 +164314,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) + // ], + // "parameters": { + // "instanceGroupManager": { +- // "description": "The name of the instance group manager.", ++ // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -156288,9 +164338,9 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "request": { +- // "$ref": "InstanceGroupManager" ++ // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -156303,35 +164353,32 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.regionInstanceGroupManagers.patchPerInstanceConfigs": ++// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": + +-type RegionInstanceGroupManagersPatchPerInstanceConfigsCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceGroupManagersSetInstanceTemplateCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// PatchPerInstanceConfigs: Inserts or patches per-instance +-// configurations for the managed instance group. perInstanceConfig.name +-// serves as a key used to distinguish whether to perform insert or +-// patch. ++// SetInstanceTemplate: Sets the instance template to use when creating ++// new instances or recreating instances in this group. Existing ++// instances are not affected. + // +-// - instanceGroupManager: The name of the managed instance group. It +-// should conform to RFC1035. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request, should conform to +-// RFC1035. +-func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { +- c := &RegionInstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instanceGroupManager: The name of the managed instance group. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { ++ c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagerpatchinstanceconfigreq = regioninstancegroupmanagerpatchinstanceconfigreq ++ c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest + return c + } + +@@ -156346,7 +164393,7 @@ func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project str + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -156354,7 +164401,7 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(reque + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -156362,21 +164409,21 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...goo + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -156384,14 +164431,14 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt s + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerpatchinstanceconfigreq) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -156406,14 +164453,14 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt s + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.patchPerInstanceConfigs" call. ++// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -156444,10 +164491,10 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog + } + return ret, nil + // { +- // "description": "Inserts or patches per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", ++ // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", ++ // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", + // "parameterOrder": [ + // "project", + // "region", +@@ -156455,7 +164502,7 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog + // ], + // "parameters": { + // "instanceGroupManager": { +- // "description": "The name of the managed instance group. It should conform to RFC1035.", ++ // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -156468,7 +164515,7 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request, should conform to RFC1035.", ++ // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -156479,9 +164526,9 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { +- // "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" ++ // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -156494,40 +164541,32 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog + + } + +-// method id "compute.regionInstanceGroupManagers.recreateInstances": ++// method id "compute.regionInstanceGroupManagers.setTargetPools": + +-type RegionInstanceGroupManagersRecreateInstancesCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceGroupManagersSetTargetPoolsCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// RecreateInstances: Flags the specified VM instances in the managed +-// instance group to be immediately recreated. Each instance is +-// recreated using the group's current configuration. This operation is +-// marked as DONE when the flag is set even if the instances have not +-// yet been recreated. You must separately verify the status of each +-// instance by checking its currentAction field; for more information, +-// see Checking the status of managed instances. If the group is part of +-// a backend service that has enabled connection draining, it can take +-// up to 60 seconds after the connection draining duration has elapsed +-// before the VM instance is removed or deleted. You can specify a +-// maximum of 1000 instances with this method per request. ++// SetTargetPools: Modifies the target pools to which all new instances ++// in this group are assigned. Existing instances in the group are not ++// affected. + // + // - instanceGroupManager: Name of the managed instance group. + // - project: Project ID for this request. + // - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { +- c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { ++ c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest ++ c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest + return c + } + +@@ -156542,7 +164581,7 @@ func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, r + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -156550,7 +164589,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId s + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -156558,21 +164597,21 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -156580,14 +164619,14 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -156602,14 +164641,14 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. ++// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -156640,10 +164679,10 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. + } + return ret, nil + // { +- // "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", ++ // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.recreateInstances", ++ // "id": "compute.regionInstanceGroupManagers.setTargetPools", + // "parameterOrder": [ + // "project", + // "region", +@@ -156675,9 +164714,9 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "request": { +- // "$ref": "RegionInstanceGroupManagersRecreateRequest" ++ // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -156690,40 +164729,208 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. + + } + +-// method id "compute.regionInstanceGroupManagers.resize": ++// method id "compute.regionInstanceGroupManagers.testIamPermissions": + +-type RegionInstanceGroupManagersResizeCall struct { ++type RegionInstanceGroupManagersTestIamPermissionsCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { ++ c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroupManagers.update": ++ ++type RegionInstanceGroupManagersUpdateCall struct { + s *Service + project string + region string + instanceGroupManager string ++ instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// Resize: Changes the intended size of the managed instance group. If +-// you increase the size, the group creates new instances using the +-// current instance template. If you decrease the size, the group +-// deletes one or more instances. The resize operation is marked DONE if +-// the resize request is successful. The underlying actions take +-// additional time. You must separately verify the status of the +-// creating or deleting actions with the listmanagedinstances method. If +-// the group is part of a backend service that has enabled connection +-// draining, it can take up to 60 seconds after the connection draining +-// duration has elapsed before the VM instance is removed or deleted. ++// Update: Updates a managed instance group using the information that ++// you specify in the request. This operation is marked as DONE when the ++// group is updated even if the instances in the group have not yet been ++// updated. You must separately verify the status of the individual ++// instances with the listmanagedinstances method. If you update your ++// group to specify a new template or instance configuration, it's ++// possible that your intended specification for each VM in the group is ++// different from the current state of that VM. To learn how to apply an ++// updated configuration to the VMs in a MIG, see Updating instances in ++// a MIG. + // +-// - instanceGroupManager: Name of the managed instance group. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-// - size: Number of instances that should exist in this instance group +-// manager. +-func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { +- c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instanceGroupManager: The name of the instance group manager. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { ++ c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager +- c.urlParams_.Set("size", fmt.Sprint(size)) ++ c.instancegroupmanager = instancegroupmanager + return c + } + +@@ -156738,7 +164945,7 @@ func (r *RegionInstanceGroupManagersService) Resize(project string, region strin + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { ++func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -156746,7 +164953,7 @@ func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *Reg + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { ++func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -156754,21 +164961,21 @@ func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *Re + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { ++func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -156776,11 +164983,16 @@ func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Res + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } +@@ -156793,14 +165005,14 @@ func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Res + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.resize" call. ++// Do executes the "compute.regionInstanceGroupManagers.update" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -156831,19 +165043,18 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", +- // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.resize", ++ // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", ++ // "httpMethod": "PUT", ++ // "id": "compute.regionInstanceGroupManagers.update", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroupManager", +- // "size" ++ // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { +- // "description": "Name of the managed instance group.", ++ // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -156865,17 +165076,12 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" +- // }, +- // "size": { +- // "description": "Number of instances that should exist in this instance group manager.", +- // "format": "int32", +- // "location": "query", +- // "minimum": "0", +- // "required": true, +- // "type": "integer" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", ++ // "request": { ++ // "$ref": "InstanceGroupManager" ++ // }, + // "response": { + // "$ref": "Operation" + // }, +@@ -156887,43 +165093,35 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.regionInstanceGroupManagers.resizeAdvanced": ++// method id "compute.regionInstanceGroupManagers.updatePerInstanceConfigs": + +-type RegionInstanceGroupManagersResizeAdvancedCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagersresizeadvancedrequest *RegionInstanceGroupManagersResizeAdvancedRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// ResizeAdvanced: Resizes the regional managed instance group with +-// advanced configuration options like disabling creation retries. This +-// is an extended version of the resize method. If you increase the +-// size, the group creates new instances using the current instance +-// template. If you decrease the size, the group deletes one or more +-// instances. The resize operation is marked DONE if the resize request +-// is successful. The underlying actions take additional time. You must +-// separately verify the status of the creating or deleting actions with +-// the get or listmanagedinstances method. If the group is part of a +-// backend service that has enabled connection draining, it can take up +-// to 60 seconds after the connection draining duration has elapsed +-// before the VM instance is removed or deleted. ++// UpdatePerInstanceConfigs: Inserts or updates per-instance ++// configurations for the managed instance group. perInstanceConfig.name ++// serves as a key used to distinguish whether to perform insert or ++// patch. + // + // - instanceGroupManager: The name of the managed instance group. It +-// must be a string that meets the requirements in RFC1035. ++// should conform to RFC1035. + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. It must be a +-// string that meets the requirements in RFC1035. +-func (r *RegionInstanceGroupManagersService) ResizeAdvanced(project string, region string, instanceGroupManager string, regioninstancegroupmanagersresizeadvancedrequest *RegionInstanceGroupManagersResizeAdvancedRequest) *RegionInstanceGroupManagersResizeAdvancedCall { +- c := &RegionInstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region scoping this request, should conform to ++// RFC1035. ++func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { ++ c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagersresizeadvancedrequest = regioninstancegroupmanagersresizeadvancedrequest ++ c.regioninstancegroupmanagerupdateinstanceconfigreq = regioninstancegroupmanagerupdateinstanceconfigreq + return c + } + +@@ -156938,7 +165136,7 @@ func (r *RegionInstanceGroupManagersService) ResizeAdvanced(project string, regi + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeAdvancedCall { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -156946,7 +165144,7 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) RequestId(requestId stri + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeAdvancedCall { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -156954,21 +165152,21 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Fi + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeAdvancedCall { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -156976,14 +165174,14 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (* + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersresizeadvancedrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerupdateinstanceconfigreq) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -156998,14 +165196,14 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (* + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.resizeAdvanced" call. ++// Do executes the "compute.regionInstanceGroupManagers.updatePerInstanceConfigs" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -157036,10 +165234,10 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.Cal + } + return ret, nil + // { +- // "description": "Resizes the regional managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the get or listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", ++ // "description": "Inserts or updates per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.resizeAdvanced", ++ // "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", + // "parameterOrder": [ + // "project", + // "region", +@@ -157047,7 +165245,7 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.Cal + // ], + // "parameters": { + // "instanceGroupManager": { +- // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035.", ++ // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -157060,7 +165258,7 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.Cal + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request. It must be a string that meets the requirements in RFC1035.", ++ // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -157071,9 +165269,9 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.Cal + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "request": { +- // "$ref": "RegionInstanceGroupManagersResizeAdvancedRequest" ++ // "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" + // }, + // "response": { + // "$ref": "Operation" +@@ -157086,114 +165284,103 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.Cal + + } + +-// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": ++// method id "compute.regionInstanceGroups.get": + +-type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceGroupsGetCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroup string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetAutoHealingPolicies: Modifies the autohealing policy for the +-// instances in this managed instance group. [Deprecated] This method is +-// deprecated. Use regionInstanceGroupManagers.patch instead. ++// Get: Returns the specified instance group resource. + // +-// - instanceGroupManager: Name of the managed instance group. ++// - instanceGroup: Name of the instance group resource to return. + // - project: Project ID for this request. + // - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +- c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { ++ c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +- c.urlParams_.Set("requestId", requestId) ++ c.instanceGroup = instanceGroup + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { ++func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { ++func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { ++func (c *RegionInstanceGroupsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroupManager": c.instanceGroupManager, ++ "project": c.project, ++ "region": c.region, ++ "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// Do executes the "compute.regionInstanceGroups.get" call. ++// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// *InstanceGroup.ServerResponse.Header or (if a response was returned ++// at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -157212,7 +165399,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &InstanceGroup{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -157224,21 +165411,286 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl + } + return ret, nil + // { +- // "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use regionInstanceGroupManagers.patch instead.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", +- // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", ++ // "description": "Returns the specified instance group resource.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", ++ // "httpMethod": "GET", ++ // "id": "compute.regionInstanceGroups.get", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroupManager" ++ // "instanceGroup" + // ], + // "parameters": { +- // "instanceGroupManager": { +- // "description": "Name of the managed instance group.", ++ // "instanceGroup": { ++ // "description": "Name of the instance group resource to return.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", ++ // "response": { ++ // "$ref": "InstanceGroup" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroups.list": ++ ++type RegionInstanceGroupsListCall struct { ++ s *Service ++ project string ++ region string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// List: Retrieves the list of instance group resources contained within ++// the specified region. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { ++ c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *RegionInstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupsListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroups.list" call. ++// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *RegionInstanceGroupList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &RegionInstanceGroupList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves the list of instance group resources contained within the specified region.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroups", ++ // "httpMethod": "GET", ++ // "id": "compute.regionInstanceGroups.list", ++ // "parameterOrder": [ ++ // "project", ++ // "region" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", +@@ -157253,76 +165705,164 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", +- // "type": "string" ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", +- // "request": { +- // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" +- // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroups", + // "response": { +- // "$ref": "Operation" ++ // "$ref": "RegionInstanceGroupList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} + +-type RegionInstanceGroupManagersSetInstanceTemplateCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++// method id "compute.regionInstanceGroups.listInstances": ++ ++type RegionInstanceGroupsListInstancesCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroup string ++ regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetInstanceTemplate: Sets the instance template to use when creating +-// new instances or recreating instances in this group. Existing +-// instances are not affected. ++// ListInstances: Lists the instances in the specified instance group ++// and displays information about the named ports. Depending on the ++// specified options, this method can list all instances or only the ++// instances that are running. The orderBy query parameter is not ++// supported. + // +-// - instanceGroupManager: The name of the managed instance group. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { +- c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instanceGroup: Name of the regional instance group for which we ++// want to list the instances. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { ++ c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest ++ c.instanceGroup = instanceGroup ++ c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest + return c + } + +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { +- c.urlParams_.Set("requestId", requestId) ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { ++func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -157330,21 +165870,21 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googlea + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { ++func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { ++func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -157352,14 +165892,14 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt strin + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -157367,21 +165907,22 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt strin + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroupManager": c.instanceGroupManager, ++ "project": c.project, ++ "region": c.region, ++ "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.regionInstanceGroups.listInstances" call. ++// Exactly one of *RegionInstanceGroupsListInstances or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *RegionInstanceGroupsListInstances.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -157400,7 +165941,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &RegionInstanceGroupsListInstances{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -157412,22 +165953,45 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap + } + return ret, nil + // { +- // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", ++ // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", ++ // "id": "compute.regionInstanceGroups.listInstances", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroupManager" ++ // "instanceGroup" + // ], + // "parameters": { +- // "instanceGroupManager": { +- // "description": "The name of the managed instance group.", ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "instanceGroup": { ++ // "description": "Name of the regional instance group for which we want to list the instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -157441,53 +166005,75 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", +- // "type": "string" ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", ++ // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "request": { +- // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" ++ // "$ref": "RegionInstanceGroupsListInstancesRequest" + // }, + // "response": { +- // "$ref": "Operation" ++ // "$ref": "RegionInstanceGroupsListInstances" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.regionInstanceGroupManagers.setTargetPools": ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} + +-type RegionInstanceGroupManagersSetTargetPoolsCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++// method id "compute.regionInstanceGroups.setNamedPorts": ++ ++type RegionInstanceGroupsSetNamedPortsCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroup string ++ regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetTargetPools: Modifies the target pools to which all new instances +-// in this group are assigned. Existing instances in the group are not +-// affected. ++// SetNamedPorts: Sets the named ports for the specified regional ++// instance group. + // +-// - instanceGroupManager: Name of the managed instance group. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { +- c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instanceGroup: The name of the regional instance group where the ++// named ports are updated. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { ++ c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest ++ c.instanceGroup = instanceGroup ++ c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest + return c + } + +@@ -157502,7 +166088,7 @@ func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, regi + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { ++func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -157510,7 +166096,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId stri + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { ++func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -157518,21 +166104,21 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Fi + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { ++func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { ++func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -157540,14 +166126,14 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (* + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -157555,21 +166141,21 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (* + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroupManager": c.instanceGroupManager, ++ "project": c.project, ++ "region": c.region, ++ "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. ++// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -157600,18 +166186,18 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal + } + return ret, nil + // { +- // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", ++ // "description": "Sets the named ports for the specified regional instance group.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.setTargetPools", ++ // "id": "compute.regionInstanceGroups.setNamedPorts", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroupManager" ++ // "instanceGroup" + // ], + // "parameters": { +- // "instanceGroupManager": { +- // "description": "Name of the managed instance group.", ++ // "instanceGroup": { ++ // "description": "The name of the regional instance group where the named ports are updated.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -157635,9 +166221,9 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", ++ // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { +- // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" ++ // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -157650,9 +166236,9 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal + + } + +-// method id "compute.regionInstanceGroupManagers.testIamPermissions": ++// method id "compute.regionInstanceGroups.testIamPermissions": + +-type RegionInstanceGroupManagersTestIamPermissionsCall struct { ++type RegionInstanceGroupsTestIamPermissionsCall struct { + s *Service + project string + region string +@@ -157669,8 +166255,8 @@ type RegionInstanceGroupManagersTestIamPermissionsCall struct { + // - project: Project ID for this request. + // - region: The name of the region for this request. + // - resource: Name or id of the resource for this request. +-func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { +- c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { ++ c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource +@@ -157681,7 +166267,7 @@ func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { ++func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -157689,21 +166275,21 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleap + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { ++func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { ++func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -157718,7 +166304,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -157733,14 +166319,14 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. ++// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. + // Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either + // *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -157772,9 +166358,9 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.testIamPermissions", ++ // "id": "compute.regionInstanceGroups.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", +@@ -157803,7 +166389,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", ++ // "path": "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, +@@ -157819,230 +166405,29 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi + + } + +-// method id "compute.regionInstanceGroupManagers.update": ++// method id "compute.regionInstanceTemplates.delete": + +-type RegionInstanceGroupManagersUpdateCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- instancegroupmanager *InstanceGroupManager +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceTemplatesDeleteCall struct { ++ s *Service ++ project string ++ region string ++ instanceTemplate string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Update: Updates a managed instance group using the information that +-// you specify in the request. This operation is marked as DONE when the +-// group is updated even if the instances in the group have not yet been +-// updated. You must separately verify the status of the individual +-// instances with the listmanagedinstances method. If you update your +-// group to specify a new template or instance configuration, it's +-// possible that your intended specification for each VM in the group is +-// different from the current state of that VM. To learn how to apply an +-// updated configuration to the VMs in a MIG, see Updating instances in +-// a MIG. ++// Delete: Deletes the specified instance template. Deleting an instance ++// template is permanent and cannot be undone. + // +-// - instanceGroupManager: The name of the instance group manager. ++// - instanceTemplate: The name of the instance template to delete. + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { +- c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.region = region +- c.instanceGroupManager = instanceGroupManager +- c.instancegroupmanager = instancegroupmanager +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { +- c.urlParams_.Set("requestId", requestId) +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PUT", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroupManager": c.instanceGroupManager, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.regionInstanceGroupManagers.update" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &Operation{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", +- // "httpMethod": "PUT", +- // "id": "compute.regionInstanceGroupManagers.update", +- // "parameterOrder": [ +- // "project", +- // "region", +- // "instanceGroupManager" +- // ], +- // "parameters": { +- // "instanceGroupManager": { +- // "description": "The name of the instance group manager.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // }, +- // "region": { +- // "description": "Name of the region scoping this request.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", +- // "request": { +- // "$ref": "InstanceGroupManager" +- // }, +- // "response": { +- // "$ref": "Operation" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" +- // ] +- // } +- +-} +- +-// method id "compute.regionInstanceGroupManagers.updatePerInstanceConfigs": +- +-type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header +-} +- +-// UpdatePerInstanceConfigs: Inserts or updates per-instance +-// configurations for the managed instance group. perInstanceConfig.name +-// serves as a key used to distinguish whether to perform insert or +-// patch. +-// +-// - instanceGroupManager: The name of the managed instance group. It +-// should conform to RFC1035. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request, should conform to +-// RFC1035. +-func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { +- c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the region for this request. ++func (r *RegionInstanceTemplatesService) Delete(project string, region string, instanceTemplate string) *RegionInstanceTemplatesDeleteCall { ++ c := &RegionInstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagerupdateinstanceconfigreq = regioninstancegroupmanagerupdateinstanceconfigreq ++ c.instanceTemplate = instanceTemplate + return c + } + +@@ -158057,7 +166442,7 @@ func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project st + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { ++func (c *RegionInstanceTemplatesDeleteCall) RequestId(requestId string) *RegionInstanceTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -158065,7 +166450,7 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { ++func (c *RegionInstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -158073,21 +166458,21 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...go + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { ++func (c *RegionInstanceTemplatesDeleteCall) Context(ctx context.Context) *RegionInstanceTemplatesDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { ++func (c *RegionInstanceTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -158095,36 +166480,31 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerupdateinstanceconfigreq) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroupManager": c.instanceGroupManager, ++ "project": c.project, ++ "region": c.region, ++ "instanceTemplate": c.instanceTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.updatePerInstanceConfigs" call. ++// Do executes the "compute.regionInstanceTemplates.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -158155,19 +166535,20 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo + } + return ret, nil + // { +- // "description": "Inserts or updates per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", +- // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", ++ // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.regionInstanceTemplates.delete", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroupManager" ++ // "instanceTemplate" + // ], + // "parameters": { +- // "instanceGroupManager": { +- // "description": "The name of the managed instance group. It should conform to RFC1035.", ++ // "instanceTemplate": { ++ // "description": "The name of the instance template to delete.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -158179,8 +166560,9 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request, should conform to RFC1035.", ++ // "description": "The name of the region for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +@@ -158190,10 +166572,7 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", +- // "request": { +- // "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" +- // }, ++ // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "Operation" + // }, +@@ -158205,36 +166584,36 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo + + } + +-// method id "compute.regionInstanceGroups.get": ++// method id "compute.regionInstanceTemplates.get": + +-type RegionInstanceGroupsGetCall struct { +- s *Service +- project string +- region string +- instanceGroup string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceTemplatesGetCall struct { ++ s *Service ++ project string ++ region string ++ instanceTemplate string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified instance group resource. ++// Get: Returns the specified instance template. + // +-// - instanceGroup: Name of the instance group resource to return. ++// - instanceTemplate: The name of the instance template. + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { +- c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the region for this request. ++func (r *RegionInstanceTemplatesService) Get(project string, region string, instanceTemplate string) *RegionInstanceTemplatesGetCall { ++ c := &RegionInstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroup = instanceGroup ++ c.instanceTemplate = instanceTemplate + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { ++func (c *RegionInstanceTemplatesGetCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -158244,7 +166623,7 @@ func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstan + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { ++func (c *RegionInstanceTemplatesGetCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -158252,21 +166631,21 @@ func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInsta + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { ++func (c *RegionInstanceTemplatesGetCall) Context(ctx context.Context) *RegionInstanceTemplatesGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupsGetCall) Header() http.Header { ++func (c *RegionInstanceTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -158279,7 +166658,7 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -158287,21 +166666,21 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroup": c.instanceGroup, ++ "project": c.project, ++ "region": c.region, ++ "instanceTemplate": c.instanceTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroups.get" call. +-// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *InstanceGroup.ServerResponse.Header or (if a response was returned +-// at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.regionInstanceTemplates.get" call. ++// Exactly one of *InstanceTemplate or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *InstanceTemplate.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { ++func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -158320,7 +166699,7 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InstanceGroup{ ++ ret := &InstanceTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -158332,19 +166711,20 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc + } + return ret, nil + // { +- // "description": "Returns the specified instance group resource.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", ++ // "description": "Returns the specified instance template.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "httpMethod": "GET", +- // "id": "compute.regionInstanceGroups.get", ++ // "id": "compute.regionInstanceTemplates.get", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroup" ++ // "instanceTemplate" + // ], + // "parameters": { +- // "instanceGroup": { +- // "description": "Name of the instance group resource to return.", ++ // "instanceTemplate": { ++ // "description": "The name of the instance template.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -158356,15 +166736,16 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "The name of the region for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", ++ // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "response": { +- // "$ref": "InstanceGroup" ++ // "$ref": "InstanceTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -158375,163 +166756,91 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc + + } + +-// method id "compute.regionInstanceGroups.list": ++// method id "compute.regionInstanceTemplates.insert": + +-type RegionInstanceGroupsListCall struct { +- s *Service +- project string +- region string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceTemplatesInsertCall struct { ++ s *Service ++ project string ++ region string ++ instancetemplate *InstanceTemplate ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// List: Retrieves the list of instance group resources contained within +-// the specified region. ++// Insert: Creates an instance template in the specified project and ++// region using the global instance template whose URL is included in ++// the request. + // + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { +- c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the region for this request. ++func (r *RegionInstanceTemplatesService) Insert(project string, region string, instancetemplate *InstanceTemplate) *RegionInstanceTemplatesInsertCall { ++ c := &RegionInstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region ++ c.instancetemplate = instancetemplate + return c + } + +-// Filter sets the optional parameter "filter": A filter expression that +-// filters resources listed in the response. Most Compute resources +-// support two types of filter expressions: expressions that support +-// regular expressions and expressions that follow API improvement +-// proposal AIP-160. If you want to use AIP-160, your expression must +-// specify the field name, an operator, and the value that you want to +-// use for filtering. The value must be a string, a number, or a +-// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +-// or `:`. For example, if you are filtering Compute Engine instances, +-// you can exclude instances named `example-instance` by specifying +-// `name != example-instance`. The `:` operator can be used with string +-// fields to match substrings. For non-string fields it is equivalent to +-// the `=` operator. The `:*` comparison can be used to test whether a +-// key has been defined. For example, to find all objects with `owner` +-// label use: ``` labels.owner:* ``` You can also filter nested fields. +-// For example, you could specify `scheduling.automaticRestart = false` +-// to include instances only if they are not scheduled for automatic +-// restarts. You can use filtering on nested fields to filter based on +-// resource labels. To filter on multiple expressions, provide each +-// separate expression within parentheses. For example: ``` +-// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +-// ``` By default, each expression is an `AND` expression. However, you +-// can include `AND` and `OR` expressions explicitly. For example: ``` +-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +-// AND (scheduling.automaticRestart = true) ``` If you want to use a +-// regular expression, use the `eq` (equal) or `ne` (not equal) operator +-// against a single un-parenthesized expression with or without quotes +-// or against multiple parenthesized expressions. Examples: `fieldname +-// eq unquoted literal` `fieldname eq 'single quoted literal'` +-// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +-// (fieldname2 ne "literal")` The literal value is interpreted as a +-// regular expression using Google RE2 library syntax. The literal value +-// must match the entire field. For example, to filter for instances +-// that do not end with name "instance", you would use `name ne +-// .*instance`. +-func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { +- c.urlParams_.Set("filter", filter) +- return c +-} +- +-// MaxResults sets the optional parameter "maxResults": The maximum +-// number of results per page that should be returned. If the number of +-// available results is larger than `maxResults`, Compute Engine returns +-// a `nextPageToken` that can be used to get the next page of results in +-// subsequent list requests. Acceptable values are `0` to `500`, +-// inclusive. (Default: `500`) +-func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { +- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +- return c +-} +- +-// OrderBy sets the optional parameter "orderBy": Sorts list results by +-// a certain order. By default, results are returned in alphanumerical +-// order based on the resource name. You can also sort results in +-// descending order based on the creation timestamp using +-// `orderBy="creationTimestamp desc". This sorts results based on the +-// `creationTimestamp` field in reverse chronological order (newest +-// result first). Use this to sort resources like operations so that the +-// newest operation is returned first. Currently, only sorting by `name` +-// or `creationTimestamp desc` is supported. +-func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { +- c.urlParams_.Set("orderBy", orderBy) +- return c +-} +- +-// PageToken sets the optional parameter "pageToken": Specifies a page +-// token to use. Set `pageToken` to the `nextPageToken` returned by a +-// previous list request to get the next page of results. +-func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { +- c.urlParams_.Set("pageToken", pageToken) +- return c +-} +- +-// ReturnPartialSuccess sets the optional parameter +-// "returnPartialSuccess": Opt-in for partial success behavior which +-// provides partial results in case of failure. The default value is +-// false. +-func (c *RegionInstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListCall { +- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceTemplatesInsertCall) RequestId(requestId string) *RegionInstanceTemplatesInsertCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { ++func (c *RegionInstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { ++func (c *RegionInstanceTemplatesInsertCall) Context(ctx context.Context) *RegionInstanceTemplatesInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupsListCall) Header() http.Header { ++func (c *RegionInstanceTemplatesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -158543,14 +166852,14 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroups.list" call. +-// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *RegionInstanceGroupList.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { ++// Do executes the "compute.regionInstanceTemplates.insert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -158569,7 +166878,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &RegionInstanceGroupList{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -158581,38 +166890,15 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region + } + return ret, nil + // { +- // "description": "Retrieves the list of instance group resources contained within the specified region.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroups", +- // "httpMethod": "GET", +- // "id": "compute.regionInstanceGroups.list", ++ // "description": "Creates an instance template in the specified project and region using the global instance template whose URL is included in the request.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceTemplates.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { +- // "filter": { +- // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- // "location": "query", +- // "type": "string" +- // }, +- // "maxResults": { +- // "default": "500", +- // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- // "format": "uint32", +- // "location": "query", +- // "minimum": "0", +- // "type": "integer" +- // }, +- // "orderBy": { +- // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- // "location": "query", +- // "type": "string" +- // }, +- // "pageToken": { +- // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +- // "location": "query", +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -158621,80 +166907,54 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "The name of the region for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +- // "returnPartialSuccess": { +- // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", +- // "type": "boolean" ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroups", ++ // "path": "projects/{project}/regions/{region}/instanceTemplates", ++ // "request": { ++ // "$ref": "InstanceTemplate" ++ // }, + // "response": { +- // "$ref": "RegionInstanceGroupList" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] +- // } +- +-} +- +-// Pages invokes f for each page of results. +-// A non-nil error returned from f will halt the iteration. +-// The provided context supersedes any context provided to the Context method. +-func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { +- c.ctx_ = ctx +- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point +- for { +- x, err := c.Do() +- if err != nil { +- return err +- } +- if err := f(x); err != nil { +- return err +- } +- if x.NextPageToken == "" { +- return nil +- } +- c.PageToken(x.NextPageToken) +- } ++ // } ++ + } + +-// method id "compute.regionInstanceGroups.listInstances": ++// method id "compute.regionInstanceTemplates.list": + +-type RegionInstanceGroupsListInstancesCall struct { +- s *Service +- project string +- region string +- instanceGroup string +- regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceTemplatesListCall struct { ++ s *Service ++ project string ++ region string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// ListInstances: Lists the instances in the specified instance group +-// and displays information about the named ports. Depending on the +-// specified options, this method can list all instances or only the +-// instances that are running. The orderBy query parameter is not +-// supported. ++// List: Retrieves a list of instance templates that are contained ++// within the specified project and region. + // +-// - instanceGroup: Name of the regional instance group for which we +-// want to list the instances. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { +- c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - region: The name of the regions for this request. ++func (r *RegionInstanceTemplatesService) List(project string, region string) *RegionInstanceTemplatesListCall { ++ c := &RegionInstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroup = instanceGroup +- c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest + return c + } + +@@ -158733,7 +166993,7 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) Filter(filter string) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -158744,7 +167004,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionIns + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) MaxResults(maxResults int64) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -158758,7 +167018,7 @@ func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *Re + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) OrderBy(orderBy string) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -158766,7 +167026,7 @@ func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionI + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) PageToken(pageToken string) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -158775,7 +167035,7 @@ func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *Reg + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -158783,67 +167043,73 @@ func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnParti + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionInstanceTemplatesListCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) Context(ctx context.Context) *RegionInstanceTemplatesListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { ++func (c *RegionInstanceTemplatesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroup": c.instanceGroup, ++ "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroups.listInstances" call. +-// Exactly one of *RegionInstanceGroupsListInstances or error will be +-// non-nil. Any non-2xx status code is an error. Response headers are in +-// either *RegionInstanceGroupsListInstances.ServerResponse.Header or +-// (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { ++// Do executes the "compute.regionInstanceTemplates.list" call. ++// Exactly one of *InstanceTemplateList or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *InstanceTemplateList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -158862,7 +167128,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &RegionInstanceGroupsListInstances{ ++ ret := &InstanceTemplateList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -158874,14 +167140,13 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", +- // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroups.listInstances", ++ // "description": "Retrieves a list of instance templates that are contained within the specified project and region.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", ++ // "httpMethod": "GET", ++ // "id": "compute.regionInstanceTemplates.list", + // "parameterOrder": [ + // "project", +- // "region", +- // "instanceGroup" ++ // "region" + // ], + // "parameters": { + // "filter": { +@@ -158889,12 +167154,6 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + // "location": "query", + // "type": "string" + // }, +- // "instanceGroup": { +- // "description": "Name of the regional instance group for which we want to list the instances.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -158921,8 +167180,9 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "The name of the regions for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +@@ -158932,12 +167192,9 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", +- // "request": { +- // "$ref": "RegionInstanceGroupsListInstancesRequest" +- // }, ++ // "path": "projects/{project}/regions/{region}/instanceTemplates", + // "response": { +- // "$ref": "RegionInstanceGroupsListInstances" ++ // "$ref": "InstanceTemplateList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -158951,7 +167208,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { ++func (c *RegionInstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -158969,32 +167226,28 @@ func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f fun + } + } + +-// method id "compute.regionInstanceGroups.setNamedPorts": ++// method id "compute.regionInstances.bulkInsert": + +-type RegionInstanceGroupsSetNamedPortsCall struct { +- s *Service +- project string +- region string +- instanceGroup string +- regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstancesBulkInsertCall struct { ++ s *Service ++ project string ++ region string ++ bulkinsertinstanceresource *BulkInsertInstanceResource ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetNamedPorts: Sets the named ports for the specified regional +-// instance group. ++// BulkInsert: Creates multiple instances in a given region. Count ++// specifies the number of instances to create. + // +-// - instanceGroup: The name of the regional instance group where the +-// named ports are updated. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { +- c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++func (r *RegionInstancesService) BulkInsert(project string, region string, bulkinsertinstanceresource *BulkInsertInstanceResource) *RegionInstancesBulkInsertCall { ++ c := &RegionInstancesBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroup = instanceGroup +- c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest ++ c.bulkinsertinstanceresource = bulkinsertinstanceresource + return c + } + +@@ -159009,7 +167262,7 @@ func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region strin + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { ++func (c *RegionInstancesBulkInsertCall) RequestId(requestId string) *RegionInstancesBulkInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -159017,7 +167270,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *Reg + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { ++func (c *RegionInstancesBulkInsertCall) Fields(s ...googleapi.Field) *RegionInstancesBulkInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159025,21 +167278,21 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Re + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { ++func (c *RegionInstancesBulkInsertCall) Context(ctx context.Context) *RegionInstancesBulkInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { ++func (c *RegionInstancesBulkInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -159047,14 +167300,14 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Res + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertinstanceresource) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instances/bulkInsert") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -159062,21 +167315,20 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Res + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroup": c.instanceGroup, ++ "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. ++// Do executes the "compute.regionInstances.bulkInsert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -159107,22 +167359,15 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Sets the named ports for the specified regional instance group.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", ++ // "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", ++ // "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroups.setNamedPorts", ++ // "id": "compute.regionInstances.bulkInsert", + // "parameterOrder": [ + // "project", +- // "region", +- // "instanceGroup" ++ // "region" + // ], + // "parameters": { +- // "instanceGroup": { +- // "description": "The name of the regional instance group where the named ports are updated.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -159131,8 +167376,9 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "The name of the region for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +@@ -159142,9 +167388,9 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", ++ // "path": "projects/{project}/regions/{region}/instances/bulkInsert", + // "request": { +- // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" ++ // "$ref": "BulkInsertInstanceResource" + // }, + // "response": { + // "$ref": "Operation" +@@ -159157,38 +167403,56 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.regionInstanceGroups.testIamPermissions": ++// method id "compute.regionInstantSnapshots.delete": + +-type RegionInstanceGroupsTestIamPermissionsCall struct { +- s *Service +- project string +- region string +- resource string +- testpermissionsrequest *TestPermissionsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstantSnapshotsDeleteCall struct { ++ s *Service ++ project string ++ region string ++ instantSnapshot string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. ++// Delete: Deletes the specified InstantSnapshot resource. Keep in mind ++// that deleting a single instantSnapshot might not necessarily delete ++// all the data on that instantSnapshot. If any data on the ++// instantSnapshot that is marked for deletion is needed for subsequent ++// instantSnapshots, the data will be moved to the next corresponding ++// instantSnapshot. For more information, see Deleting instantSnapshots. + // ++// - instantSnapshot: Name of the InstantSnapshot resource to delete. + // - project: Project ID for this request. + // - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { +- c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstantSnapshotsService) Delete(project string, region string, instantSnapshot string) *RegionInstantSnapshotsDeleteCall { ++ c := &RegionInstantSnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest ++ c.instantSnapshot = instantSnapshot ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstantSnapshotsDeleteCall) RequestId(requestId string) *RegionInstantSnapshotsDeleteCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { ++func (c *RegionInstantSnapshotsDeleteCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159196,21 +167460,21 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { ++func (c *RegionInstantSnapshotsDeleteCall) Context(ctx context.Context) *RegionInstantSnapshotsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { ++func (c *RegionInstantSnapshotsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -159218,36 +167482,31 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*htt + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, ++ "project": c.project, ++ "region": c.region, ++ "instantSnapshot": c.instantSnapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++// Do executes the "compute.regionInstantSnapshots.delete" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstantSnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -159266,7 +167525,7 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &TestPermissionsResponse{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -159278,16 +167537,23 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", +- // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroups.testIamPermissions", ++ // "description": "Deletes the specified InstantSnapshot resource. Keep in mind that deleting a single instantSnapshot might not necessarily delete all the data on that instantSnapshot. If any data on the instantSnapshot that is marked for deletion is needed for subsequent instantSnapshots, the data will be moved to the next corresponding instantSnapshot. For more information, see Deleting instantSnapshots.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.regionInstantSnapshots.delete", + // "parameterOrder": [ + // "project", + // "region", +- // "resource" ++ // "instantSnapshot" + // ], + // "parameters": { ++ // "instantSnapshot": { ++ // "description": "Name of the InstantSnapshot resource to delete.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -159302,53 +167568,49 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", +- // "request": { +- // "$ref": "TestPermissionsRequest" +- // }, ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + // "response": { +- // "$ref": "TestPermissionsResponse" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.regionInstanceTemplates.delete": ++// method id "compute.regionInstantSnapshots.export": + +-type RegionInstanceTemplatesDeleteCall struct { +- s *Service +- project string +- region string +- instanceTemplate string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstantSnapshotsExportCall struct { ++ s *Service ++ project string ++ region string ++ instantSnapshot string ++ regioninstantsnapshotsexportrequest *RegionInstantSnapshotsExportRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified instance template. Deleting an instance +-// template is permanent and cannot be undone. ++// Export: Export the changed blocks between two instant snapshots to a ++// customer's bucket in the user specified format. + // +-// - instanceTemplate: The name of the instance template to delete. ++// - instantSnapshot: Name of the instant snapshot to export. + // - project: Project ID for this request. +-// - region: The name of the region for this request. +-func (r *RegionInstanceTemplatesService) Delete(project string, region string, instanceTemplate string) *RegionInstanceTemplatesDeleteCall { +- c := &RegionInstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the zone for this request. ++func (r *RegionInstantSnapshotsService) Export(project string, region string, instantSnapshot string, regioninstantsnapshotsexportrequest *RegionInstantSnapshotsExportRequest) *RegionInstantSnapshotsExportCall { ++ c := &RegionInstantSnapshotsExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceTemplate = instanceTemplate ++ c.instantSnapshot = instantSnapshot ++ c.regioninstantsnapshotsexportrequest = regioninstantsnapshotsexportrequest + return c + } + +@@ -159363,7 +167625,7 @@ func (r *RegionInstanceTemplatesService) Delete(project string, region string, i + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceTemplatesDeleteCall) RequestId(requestId string) *RegionInstanceTemplatesDeleteCall { ++func (c *RegionInstantSnapshotsExportCall) RequestId(requestId string) *RegionInstantSnapshotsExportCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -159371,7 +167633,7 @@ func (c *RegionInstanceTemplatesDeleteCall) RequestId(requestId string) *RegionI + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesDeleteCall { ++func (c *RegionInstantSnapshotsExportCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsExportCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159379,21 +167641,21 @@ func (c *RegionInstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *Region + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceTemplatesDeleteCall) Context(ctx context.Context) *RegionInstanceTemplatesDeleteCall { ++func (c *RegionInstantSnapshotsExportCall) Context(ctx context.Context) *RegionInstantSnapshotsExportCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceTemplatesDeleteCall) Header() http.Header { ++func (c *RegionInstantSnapshotsExportCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsExportCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -159401,31 +167663,36 @@ func (c *RegionInstanceTemplatesDeleteCall) doRequest(alt string) (*http.Respons + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstantsnapshotsexportrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}/export") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("DELETE", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceTemplate": c.instanceTemplate, ++ "project": c.project, ++ "region": c.region, ++ "instantSnapshot": c.instantSnapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceTemplates.delete" call. ++// Do executes the "compute.regionInstantSnapshots.export" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstantSnapshotsExportCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -159456,18 +167723,18 @@ func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*O + } + return ret, nil + // { +- // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone.", +- // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", +- // "httpMethod": "DELETE", +- // "id": "compute.regionInstanceTemplates.delete", ++ // "description": "Export the changed blocks between two instant snapshots to a customer's bucket in the user specified format.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}/export", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstantSnapshots.export", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceTemplate" ++ // "instantSnapshot" + // ], + // "parameters": { +- // "instanceTemplate": { +- // "description": "The name of the instance template to delete.", ++ // "instantSnapshot": { ++ // "description": "Name of the instant snapshot to export.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -159481,7 +167748,7 @@ func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*O + // "type": "string" + // }, + // "region": { +- // "description": "The name of the region for this request.", ++ // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, +@@ -159493,7 +167760,10 @@ func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*O + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}/export", ++ // "request": { ++ // "$ref": "RegionInstantSnapshotsExportRequest" ++ // }, + // "response": { + // "$ref": "Operation" + // }, +@@ -159505,36 +167775,37 @@ func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*O + + } + +-// method id "compute.regionInstanceTemplates.get": ++// method id "compute.regionInstantSnapshots.get": + +-type RegionInstanceTemplatesGetCall struct { +- s *Service +- project string +- region string +- instanceTemplate string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type RegionInstantSnapshotsGetCall struct { ++ s *Service ++ project string ++ region string ++ instantSnapshot string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified instance template. ++// Get: Returns the specified InstantSnapshot resource in the specified ++// region. + // +-// - instanceTemplate: The name of the instance template. ++// - instantSnapshot: Name of the InstantSnapshot resource to return. + // - project: Project ID for this request. + // - region: The name of the region for this request. +-func (r *RegionInstanceTemplatesService) Get(project string, region string, instanceTemplate string) *RegionInstanceTemplatesGetCall { +- c := &RegionInstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstantSnapshotsService) Get(project string, region string, instantSnapshot string) *RegionInstantSnapshotsGetCall { ++ c := &RegionInstantSnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceTemplate = instanceTemplate ++ c.instantSnapshot = instantSnapshot + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceTemplatesGetCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesGetCall { ++func (c *RegionInstantSnapshotsGetCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159544,7 +167815,7 @@ func (c *RegionInstanceTemplatesGetCall) Fields(s ...googleapi.Field) *RegionIns + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *RegionInstanceTemplatesGetCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesGetCall { ++func (c *RegionInstantSnapshotsGetCall) IfNoneMatch(entityTag string) *RegionInstantSnapshotsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -159552,21 +167823,21 @@ func (c *RegionInstanceTemplatesGetCall) IfNoneMatch(entityTag string) *RegionIn + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceTemplatesGetCall) Context(ctx context.Context) *RegionInstanceTemplatesGetCall { ++func (c *RegionInstantSnapshotsGetCall) Context(ctx context.Context) *RegionInstantSnapshotsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceTemplatesGetCall) Header() http.Header { ++func (c *RegionInstantSnapshotsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -159579,7 +167850,7 @@ func (c *RegionInstanceTemplatesGetCall) doRequest(alt string) (*http.Response, + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -159587,21 +167858,21 @@ func (c *RegionInstanceTemplatesGetCall) doRequest(alt string) (*http.Response, + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceTemplate": c.instanceTemplate, ++ "project": c.project, ++ "region": c.region, ++ "instantSnapshot": c.instantSnapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceTemplates.get" call. +-// Exactly one of *InstanceTemplate or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *InstanceTemplate.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.regionInstantSnapshots.get" call. ++// Exactly one of *InstantSnapshot or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *InstantSnapshot.ServerResponse.Header or (if a response was returned ++// at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { ++func (c *RegionInstantSnapshotsGetCall) Do(opts ...googleapi.CallOption) (*InstantSnapshot, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -159620,7 +167891,7 @@ func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*Inst + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InstanceTemplate{ ++ ret := &InstantSnapshot{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -159632,18 +167903,18 @@ func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*Inst + } + return ret, nil + // { +- // "description": "Returns the specified instance template.", +- // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", ++ // "description": "Returns the specified InstantSnapshot resource in the specified region.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + // "httpMethod": "GET", +- // "id": "compute.regionInstanceTemplates.get", ++ // "id": "compute.regionInstantSnapshots.get", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceTemplate" ++ // "instantSnapshot" + // ], + // "parameters": { +- // "instanceTemplate": { +- // "description": "The name of the instance template.", ++ // "instantSnapshot": { ++ // "description": "Name of the InstantSnapshot resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -159664,9 +167935,9 @@ func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*Inst + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + // "response": { +- // "$ref": "InstanceTemplate" ++ // "$ref": "InstantSnapshot" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -159677,29 +167948,213 @@ func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*Inst + + } + +-// method id "compute.regionInstanceTemplates.insert": ++// method id "compute.regionInstantSnapshots.getIamPolicy": + +-type RegionInstanceTemplatesInsertCall struct { +- s *Service +- project string +- region string +- instancetemplate *InstanceTemplate +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstantSnapshotsGetIamPolicyCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates an instance template in the specified project and +-// region using the global instance template whose URL is included in +-// the request. ++// GetIamPolicy: Gets the access control policy for a resource. May be ++// empty if no such policy or resource exists. + // + // - project: Project ID for this request. + // - region: The name of the region for this request. +-func (r *RegionInstanceTemplatesService) Insert(project string, region string, instancetemplate *InstanceTemplate) *RegionInstanceTemplatesInsertCall { +- c := &RegionInstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - resource: Name or id of the resource for this request. ++func (r *RegionInstantSnapshotsService) GetIamPolicy(project string, region string, resource string) *RegionInstantSnapshotsGetIamPolicyCall { ++ c := &RegionInstantSnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instancetemplate = instancetemplate ++ c.resource = resource ++ return c ++} ++ ++// OptionsRequestedPolicyVersion sets the optional parameter ++// "optionsRequestedPolicyVersion": Requested IAM Policy version. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *RegionInstantSnapshotsGetIamPolicyCall { ++ c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsGetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) IfNoneMatch(entityTag string) *RegionInstantSnapshotsGetIamPolicyCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) Context(ctx context.Context) *RegionInstantSnapshotsGetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstantSnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{resource}/getIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstantSnapshots.getIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/getIamPolicy", ++ // "httpMethod": "GET", ++ // "id": "compute.regionInstantSnapshots.getIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "optionsRequestedPolicyVersion": { ++ // "description": "Requested IAM Policy version.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/getIamPolicy", ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstantSnapshots.insert": ++ ++type RegionInstantSnapshotsInsertCall struct { ++ s *Service ++ project string ++ region string ++ instantsnapshot *InstantSnapshot ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates an instant snapshot in the specified region. ++// ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++func (r *RegionInstantSnapshotsService) Insert(project string, region string, instantsnapshot *InstantSnapshot) *RegionInstantSnapshotsInsertCall { ++ c := &RegionInstantSnapshotsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instantsnapshot = instantsnapshot + return c + } + +@@ -159714,7 +168169,7 @@ func (r *RegionInstanceTemplatesService) Insert(project string, region string, i + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceTemplatesInsertCall) RequestId(requestId string) *RegionInstanceTemplatesInsertCall { ++func (c *RegionInstantSnapshotsInsertCall) RequestId(requestId string) *RegionInstantSnapshotsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -159722,7 +168177,7 @@ func (c *RegionInstanceTemplatesInsertCall) RequestId(requestId string) *RegionI + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesInsertCall { ++func (c *RegionInstantSnapshotsInsertCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159730,21 +168185,21 @@ func (c *RegionInstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *Region + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceTemplatesInsertCall) Context(ctx context.Context) *RegionInstanceTemplatesInsertCall { ++func (c *RegionInstantSnapshotsInsertCall) Context(ctx context.Context) *RegionInstantSnapshotsInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceTemplatesInsertCall) Header() http.Header { ++func (c *RegionInstantSnapshotsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -159752,14 +168207,14 @@ func (c *RegionInstanceTemplatesInsertCall) doRequest(alt string) (*http.Respons + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instantsnapshot) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -159773,14 +168228,14 @@ func (c *RegionInstanceTemplatesInsertCall) doRequest(alt string) (*http.Respons + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceTemplates.insert" call. ++// Do executes the "compute.regionInstantSnapshots.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstantSnapshotsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -159811,10 +168266,10 @@ func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*O + } + return ret, nil + // { +- // "description": "Creates an instance template in the specified project and region using the global instance template whose URL is included in the request.", +- // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", ++ // "description": "Creates an instant snapshot in the specified region.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceTemplates.insert", ++ // "id": "compute.regionInstantSnapshots.insert", + // "parameterOrder": [ + // "project", + // "region" +@@ -159828,7 +168283,7 @@ func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*O + // "type": "string" + // }, + // "region": { +- // "description": "The name of the region for this request.", ++ // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, +@@ -159840,9 +168295,9 @@ func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*O + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceTemplates", ++ // "path": "projects/{project}/regions/{region}/instantSnapshots", + // "request": { +- // "$ref": "InstanceTemplate" ++ // "$ref": "InstantSnapshot" + // }, + // "response": { + // "$ref": "Operation" +@@ -159855,9 +168310,9 @@ func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*O + + } + +-// method id "compute.regionInstanceTemplates.list": ++// method id "compute.regionInstantSnapshots.list": + +-type RegionInstanceTemplatesListCall struct { ++type RegionInstantSnapshotsListCall struct { + s *Service + project string + region string +@@ -159867,13 +168322,13 @@ type RegionInstanceTemplatesListCall struct { + header_ http.Header + } + +-// List: Retrieves a list of instance templates that are contained +-// within the specified project and region. ++// List: Retrieves the list of InstantSnapshot resources contained ++// within the specified region. + // + // - project: Project ID for this request. +-// - region: The name of the regions for this request. +-func (r *RegionInstanceTemplatesService) List(project string, region string) *RegionInstanceTemplatesListCall { +- c := &RegionInstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the region for this request. ++func (r *RegionInstantSnapshotsService) List(project string, region string) *RegionInstantSnapshotsListCall { ++ c := &RegionInstantSnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +@@ -159914,7 +168369,7 @@ func (r *RegionInstanceTemplatesService) List(project string, region string) *Re + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *RegionInstanceTemplatesListCall) Filter(filter string) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) Filter(filter string) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -159925,7 +168380,7 @@ func (c *RegionInstanceTemplatesListCall) Filter(filter string) *RegionInstanceT + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *RegionInstanceTemplatesListCall) MaxResults(maxResults int64) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) MaxResults(maxResults int64) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -159939,7 +168394,7 @@ func (c *RegionInstanceTemplatesListCall) MaxResults(maxResults int64) *RegionIn + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *RegionInstanceTemplatesListCall) OrderBy(orderBy string) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) OrderBy(orderBy string) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -159947,7 +168402,7 @@ func (c *RegionInstanceTemplatesListCall) OrderBy(orderBy string) *RegionInstanc + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *RegionInstanceTemplatesListCall) PageToken(pageToken string) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) PageToken(pageToken string) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -159956,7 +168411,7 @@ func (c *RegionInstanceTemplatesListCall) PageToken(pageToken string) *RegionIns + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *RegionInstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -159964,7 +168419,7 @@ func (c *RegionInstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSucc + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceTemplatesListCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159974,7 +168429,7 @@ func (c *RegionInstanceTemplatesListCall) Fields(s ...googleapi.Field) *RegionIn + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *RegionInstanceTemplatesListCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) IfNoneMatch(entityTag string) *RegionInstantSnapshotsListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -159982,21 +168437,21 @@ func (c *RegionInstanceTemplatesListCall) IfNoneMatch(entityTag string) *RegionI + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceTemplatesListCall) Context(ctx context.Context) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) Context(ctx context.Context) *RegionInstantSnapshotsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceTemplatesListCall) Header() http.Header { ++func (c *RegionInstantSnapshotsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -160009,7 +168464,7 @@ func (c *RegionInstanceTemplatesListCall) doRequest(alt string) (*http.Response, + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -160023,14 +168478,14 @@ func (c *RegionInstanceTemplatesListCall) doRequest(alt string) (*http.Response, + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceTemplates.list" call. +-// Exactly one of *InstanceTemplateList or error will be non-nil. Any ++// Do executes the "compute.regionInstantSnapshots.list" call. ++// Exactly one of *InstantSnapshotList or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either +-// *InstanceTemplateList.ServerResponse.Header or (if a response was ++// *InstantSnapshotList.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { ++func (c *RegionInstantSnapshotsListCall) Do(opts ...googleapi.CallOption) (*InstantSnapshotList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -160049,7 +168504,7 @@ func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*Ins + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InstanceTemplateList{ ++ ret := &InstantSnapshotList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -160061,10 +168516,10 @@ func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*Ins + } + return ret, nil + // { +- // "description": "Retrieves a list of instance templates that are contained within the specified project and region.", +- // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", ++ // "description": "Retrieves the list of InstantSnapshot resources contained within the specified region.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots", + // "httpMethod": "GET", +- // "id": "compute.regionInstanceTemplates.list", ++ // "id": "compute.regionInstantSnapshots.list", + // "parameterOrder": [ + // "project", + // "region" +@@ -160101,7 +168556,7 @@ func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*Ins + // "type": "string" + // }, + // "region": { +- // "description": "The name of the regions for this request.", ++ // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, +@@ -160113,9 +168568,9 @@ func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*Ins + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceTemplates", ++ // "path": "projects/{project}/regions/{region}/instantSnapshots", + // "response": { +- // "$ref": "InstanceTemplateList" ++ // "$ref": "InstantSnapshotList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -160129,7 +168584,7 @@ func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*Ins + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *RegionInstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { ++func (c *RegionInstantSnapshotsListCall) Pages(ctx context.Context, f func(*InstantSnapshotList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -160147,28 +168602,200 @@ func (c *RegionInstanceTemplatesListCall) Pages(ctx context.Context, f func(*Ins + } + } + +-// method id "compute.regionInstances.bulkInsert": ++// method id "compute.regionInstantSnapshots.setIamPolicy": + +-type RegionInstancesBulkInsertCall struct { +- s *Service +- project string +- region string +- bulkinsertinstanceresource *BulkInsertInstanceResource +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstantSnapshotsSetIamPolicyCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ regionsetpolicyrequest *RegionSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// BulkInsert: Creates multiple instances in a given region. Count +-// specifies the number of instances to create. ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. + // + // - project: Project ID for this request. + // - region: The name of the region for this request. +-func (r *RegionInstancesService) BulkInsert(project string, region string, bulkinsertinstanceresource *BulkInsertInstanceResource) *RegionInstancesBulkInsertCall { +- c := &RegionInstancesBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - resource: Name or id of the resource for this request. ++func (r *RegionInstantSnapshotsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionInstantSnapshotsSetIamPolicyCall { ++ c := &RegionInstantSnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.bulkinsertinstanceresource = bulkinsertinstanceresource ++ c.resource = resource ++ c.regionsetpolicyrequest = regionsetpolicyrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstantSnapshotsSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsSetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstantSnapshotsSetIamPolicyCall) Context(ctx context.Context) *RegionInstantSnapshotsSetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstantSnapshotsSetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstantSnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{resource}/setIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstantSnapshots.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *RegionInstantSnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setIamPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstantSnapshots.setIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setIamPolicy", ++ // "request": { ++ // "$ref": "RegionSetPolicyRequest" ++ // }, ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstantSnapshots.setLabels": ++ ++type RegionInstantSnapshotsSetLabelsCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ regionsetlabelsrequest *RegionSetLabelsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetLabels: Sets the labels on a instantSnapshot in the given region. ++// To learn more about labels, read the Labeling Resources ++// documentation. ++// ++// - project: Project ID for this request. ++// - region: The region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *RegionInstantSnapshotsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionInstantSnapshotsSetLabelsCall { ++ c := &RegionInstantSnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.resource = resource ++ c.regionsetlabelsrequest = regionsetlabelsrequest + return c + } + +@@ -160183,7 +168810,7 @@ func (r *RegionInstancesService) BulkInsert(project string, region string, bulki + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstancesBulkInsertCall) RequestId(requestId string) *RegionInstancesBulkInsertCall { ++func (c *RegionInstantSnapshotsSetLabelsCall) RequestId(requestId string) *RegionInstantSnapshotsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -160191,7 +168818,7 @@ func (c *RegionInstancesBulkInsertCall) RequestId(requestId string) *RegionInsta + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstancesBulkInsertCall) Fields(s ...googleapi.Field) *RegionInstancesBulkInsertCall { ++func (c *RegionInstantSnapshotsSetLabelsCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -160199,21 +168826,21 @@ func (c *RegionInstancesBulkInsertCall) Fields(s ...googleapi.Field) *RegionInst + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstancesBulkInsertCall) Context(ctx context.Context) *RegionInstancesBulkInsertCall { ++func (c *RegionInstantSnapshotsSetLabelsCall) Context(ctx context.Context) *RegionInstantSnapshotsSetLabelsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstancesBulkInsertCall) Header() http.Header { ++func (c *RegionInstantSnapshotsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -160221,14 +168848,14 @@ func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, e + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertinstanceresource) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instances/bulkInsert") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -160236,20 +168863,21 @@ func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, e + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstances.bulkInsert" call. ++// Do executes the "compute.regionInstantSnapshots.setLabels" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstantSnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -160280,13 +168908,14 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera + } + return ret, nil + // { +- // "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", +- // "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", ++ // "description": "Sets the labels on a instantSnapshot in the given region. To learn more about labels, read the Labeling Resources documentation.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setLabels", + // "httpMethod": "POST", +- // "id": "compute.regionInstances.bulkInsert", ++ // "id": "compute.regionInstantSnapshots.setLabels", + // "parameterOrder": [ + // "project", +- // "region" ++ // "region", ++ // "resource" + // ], + // "parameters": { + // "project": { +@@ -160297,7 +168926,7 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera + // "type": "string" + // }, + // "region": { +- // "description": "The name of the region for this request.", ++ // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, +@@ -160307,11 +168936,18 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instances/bulkInsert", ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setLabels", + // "request": { +- // "$ref": "BulkInsertInstanceResource" ++ // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -160324,6 +168960,175 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera + + } + ++// method id "compute.regionInstantSnapshots.testIamPermissions": ++ ++type RegionInstantSnapshotsTestIamPermissionsCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *RegionInstantSnapshotsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstantSnapshotsTestIamPermissionsCall { ++ c := &RegionInstantSnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstantSnapshotsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstantSnapshotsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstantSnapshotsTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstantSnapshotsTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstantSnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstantSnapshots.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionInstantSnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstantSnapshots.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionNetworkEndpointGroups.delete": + + type RegionNetworkEndpointGroupsDeleteCall struct { +@@ -166167,6 +174972,185 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + ++// method id "compute.regionSecurityPolicies.addRule": ++ ++type RegionSecurityPoliciesAddRuleCall struct { ++ s *Service ++ project string ++ region string ++ securityPolicy string ++ securitypolicyrule *SecurityPolicyRule ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// AddRule: Inserts a rule into a security policy. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - securityPolicy: Name of the security policy to update. ++func (r *RegionSecurityPoliciesService) AddRule(project string, region string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *RegionSecurityPoliciesAddRuleCall { ++ c := &RegionSecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.securityPolicy = securityPolicy ++ c.securitypolicyrule = securitypolicyrule ++ return c ++} ++ ++// ValidateOnly sets the optional parameter "validateOnly": If true, the ++// request will not be committed. ++func (c *RegionSecurityPoliciesAddRuleCall) ValidateOnly(validateOnly bool) *RegionSecurityPoliciesAddRuleCall { ++ c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionSecurityPoliciesAddRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesAddRuleCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionSecurityPoliciesAddRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesAddRuleCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionSecurityPoliciesAddRuleCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionSecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "securityPolicy": c.securityPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionSecurityPolicies.addRule" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionSecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Inserts a rule into a security policy.", ++ // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", ++ // "httpMethod": "POST", ++ // "id": "compute.regionSecurityPolicies.addRule", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "securityPolicy" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "securityPolicy": { ++ // "description": "Name of the security policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "validateOnly": { ++ // "description": "If true, the request will not be committed.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", ++ // "request": { ++ // "$ref": "SecurityPolicyRule" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionSecurityPolicies.delete": + + type RegionSecurityPoliciesDeleteCall struct { +@@ -166518,6 +175502,192 @@ func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*Secur + + } + ++// method id "compute.regionSecurityPolicies.getRule": ++ ++type RegionSecurityPoliciesGetRuleCall struct { ++ s *Service ++ project string ++ region string ++ securityPolicy string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// GetRule: Gets a rule at the specified priority. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - securityPolicy: Name of the security policy to which the queried ++// rule belongs. ++func (r *RegionSecurityPoliciesService) GetRule(project string, region string, securityPolicy string) *RegionSecurityPoliciesGetRuleCall { ++ c := &RegionSecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.securityPolicy = securityPolicy ++ return c ++} ++ ++// Priority sets the optional parameter "priority": The priority of the ++// rule to get from the security policy. ++func (c *RegionSecurityPoliciesGetRuleCall) Priority(priority int64) *RegionSecurityPoliciesGetRuleCall { ++ c.urlParams_.Set("priority", fmt.Sprint(priority)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionSecurityPoliciesGetRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesGetRuleCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionSecurityPoliciesGetRuleCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesGetRuleCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionSecurityPoliciesGetRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesGetRuleCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionSecurityPoliciesGetRuleCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionSecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "securityPolicy": c.securityPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionSecurityPolicies.getRule" call. ++// Exactly one of *SecurityPolicyRule or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *SecurityPolicyRule.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionSecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyRule, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &SecurityPolicyRule{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Gets a rule at the specified priority.", ++ // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", ++ // "httpMethod": "GET", ++ // "id": "compute.regionSecurityPolicies.getRule", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "securityPolicy" ++ // ], ++ // "parameters": { ++ // "priority": { ++ // "description": "The priority of the rule to get from the security policy.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "securityPolicy": { ++ // "description": "Name of the security policy to which the queried rule belongs.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", ++ // "response": { ++ // "$ref": "SecurityPolicyRule" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionSecurityPolicies.insert": + + type RegionSecurityPoliciesInsertCall struct { +@@ -167191,6 +176361,370 @@ func (c *RegionSecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Ope + + } + ++// method id "compute.regionSecurityPolicies.patchRule": ++ ++type RegionSecurityPoliciesPatchRuleCall struct { ++ s *Service ++ project string ++ region string ++ securityPolicy string ++ securitypolicyrule *SecurityPolicyRule ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// PatchRule: Patches a rule at the specified priority. To clear fields ++// in the rule, leave the fields empty and specify them in the ++// updateMask. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - securityPolicy: Name of the security policy to update. ++func (r *RegionSecurityPoliciesService) PatchRule(project string, region string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *RegionSecurityPoliciesPatchRuleCall { ++ c := &RegionSecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.securityPolicy = securityPolicy ++ c.securitypolicyrule = securitypolicyrule ++ return c ++} ++ ++// Priority sets the optional parameter "priority": The priority of the ++// rule to patch. ++func (c *RegionSecurityPoliciesPatchRuleCall) Priority(priority int64) *RegionSecurityPoliciesPatchRuleCall { ++ c.urlParams_.Set("priority", fmt.Sprint(priority)) ++ return c ++} ++ ++// ValidateOnly sets the optional parameter "validateOnly": If true, the ++// request will not be committed. ++func (c *RegionSecurityPoliciesPatchRuleCall) ValidateOnly(validateOnly bool) *RegionSecurityPoliciesPatchRuleCall { ++ c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionSecurityPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesPatchRuleCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionSecurityPoliciesPatchRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesPatchRuleCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionSecurityPoliciesPatchRuleCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionSecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "securityPolicy": c.securityPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionSecurityPolicies.patchRule" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionSecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Patches a rule at the specified priority. To clear fields in the rule, leave the fields empty and specify them in the updateMask.", ++ // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", ++ // "httpMethod": "POST", ++ // "id": "compute.regionSecurityPolicies.patchRule", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "securityPolicy" ++ // ], ++ // "parameters": { ++ // "priority": { ++ // "description": "The priority of the rule to patch.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "securityPolicy": { ++ // "description": "Name of the security policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "validateOnly": { ++ // "description": "If true, the request will not be committed.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", ++ // "request": { ++ // "$ref": "SecurityPolicyRule" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionSecurityPolicies.removeRule": ++ ++type RegionSecurityPoliciesRemoveRuleCall struct { ++ s *Service ++ project string ++ region string ++ securityPolicy string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// RemoveRule: Deletes a rule at the specified priority. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - securityPolicy: Name of the security policy to update. ++func (r *RegionSecurityPoliciesService) RemoveRule(project string, region string, securityPolicy string) *RegionSecurityPoliciesRemoveRuleCall { ++ c := &RegionSecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.securityPolicy = securityPolicy ++ return c ++} ++ ++// Priority sets the optional parameter "priority": The priority of the ++// rule to remove from the security policy. ++func (c *RegionSecurityPoliciesRemoveRuleCall) Priority(priority int64) *RegionSecurityPoliciesRemoveRuleCall { ++ c.urlParams_.Set("priority", fmt.Sprint(priority)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionSecurityPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesRemoveRuleCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionSecurityPoliciesRemoveRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesRemoveRuleCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionSecurityPoliciesRemoveRuleCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionSecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "securityPolicy": c.securityPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionSecurityPolicies.removeRule" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionSecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Deletes a rule at the specified priority.", ++ // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", ++ // "httpMethod": "POST", ++ // "id": "compute.regionSecurityPolicies.removeRule", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "securityPolicy" ++ // ], ++ // "parameters": { ++ // "priority": { ++ // "description": "The priority of the rule to remove from the security policy.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "securityPolicy": { ++ // "description": "Name of the security policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionSslCertificates.delete": + + type RegionSslCertificatesDeleteCall struct { +@@ -199923,6 +209457,196 @@ func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInsta + } + } + ++// method id "compute.targetInstances.setSecurityPolicy": ++ ++type TargetInstancesSetSecurityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ targetInstance string ++ securitypolicyreference *SecurityPolicyReference ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified target instance. For more information, see Google Cloud ++// Armor Overview ++// ++// - project: Project ID for this request. ++// - targetInstance: Name of the TargetInstance resource to which the ++// security policy should be set. The name should conform to RFC1035. ++// - zone: Name of the zone scoping this request. ++func (r *TargetInstancesService) SetSecurityPolicy(project string, zone string, targetInstance string, securitypolicyreference *SecurityPolicyReference) *TargetInstancesSetSecurityPolicyCall { ++ c := &TargetInstancesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.targetInstance = targetInstance ++ c.securitypolicyreference = securitypolicyreference ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *TargetInstancesSetSecurityPolicyCall) RequestId(requestId string) *TargetInstancesSetSecurityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *TargetInstancesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *TargetInstancesSetSecurityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *TargetInstancesSetSecurityPolicyCall) Context(ctx context.Context) *TargetInstancesSetSecurityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *TargetInstancesSetSecurityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *TargetInstancesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "targetInstance": c.targetInstance, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.targetInstances.setSecurityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *TargetInstancesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Google Cloud Armor security policy for the specified target instance. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.targetInstances.setSecurityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "targetInstance" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "targetInstance": { ++ // "description": "Name of the TargetInstance resource to which the security policy should be set. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "Name of the zone scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ // "request": { ++ // "$ref": "SecurityPolicyReference" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.targetInstances.testIamPermissions": + + type TargetInstancesTestIamPermissionsCall struct { +@@ -202333,6 +212057,196 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + ++// method id "compute.targetPools.setSecurityPolicy": ++ ++type TargetPoolsSetSecurityPolicyCall struct { ++ s *Service ++ project string ++ region string ++ targetPool string ++ securitypolicyreference *SecurityPolicyReference ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified target pool. For more information, see Google Cloud ++// Armor Overview ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - targetPool: Name of the TargetPool resource to which the security ++// policy should be set. The name should conform to RFC1035. ++func (r *TargetPoolsService) SetSecurityPolicy(project string, region string, targetPool string, securitypolicyreference *SecurityPolicyReference) *TargetPoolsSetSecurityPolicyCall { ++ c := &TargetPoolsSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.targetPool = targetPool ++ c.securitypolicyreference = securitypolicyreference ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *TargetPoolsSetSecurityPolicyCall) RequestId(requestId string) *TargetPoolsSetSecurityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *TargetPoolsSetSecurityPolicyCall) Fields(s ...googleapi.Field) *TargetPoolsSetSecurityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *TargetPoolsSetSecurityPolicyCall) Context(ctx context.Context) *TargetPoolsSetSecurityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *TargetPoolsSetSecurityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *TargetPoolsSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "targetPool": c.targetPool, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.targetPools.setSecurityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *TargetPoolsSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Google Cloud Armor security policy for the specified target pool. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.targetPools.setSecurityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "targetPool" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "targetPool": { ++ // "description": "Name of the TargetPool resource to which the security policy should be set. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ // "request": { ++ // "$ref": "SecurityPolicyReference" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.targetPools.testIamPermissions": + + type TargetPoolsTestIamPermissionsCall struct { +diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json +index b380f1a7034..4ef0af54620 100644 +--- a/vendor/google.golang.org/api/compute/v1/compute-api.json ++++ b/vendor/google.golang.org/api/compute/v1/compute-api.json +@@ -550,6 +550,56 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "move": { ++ "description": "Moves the specified address resource.", ++ "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", ++ "httpMethod": "POST", ++ "id": "compute.addresses.move", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "address" ++ ], ++ "parameters": { ++ "address": { ++ "description": "Name of the address resource to move.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Source project ID which the Address is moved from.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/addresses/{address}/move", ++ "request": { ++ "$ref": "RegionAddressesMoveRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setLabels": { + "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", +@@ -4497,6 +4547,48 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "move": { ++ "description": "Moves the specified address resource from one project to another project.", ++ "flatPath": "projects/{project}/global/addresses/{address}/move", ++ "httpMethod": "POST", ++ "id": "compute.globalAddresses.move", ++ "parameterOrder": [ ++ "project", ++ "address" ++ ], ++ "parameters": { ++ "address": { ++ "description": "Name of the address resource to move.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Source project ID which the Address is moved from.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/addresses/{address}/move", ++ "request": { ++ "$ref": "GlobalAddressesMoveRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setLabels": { + "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", +@@ -10647,6 +10739,11 @@ + "required": true, + "type": "string" + }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", +@@ -11669,6 +11766,100 @@ + } + } + }, ++ "interconnectRemoteLocations": { ++ "methods": { ++ "get": { ++ "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", ++ "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ "httpMethod": "GET", ++ "id": "compute.interconnectRemoteLocations.get", ++ "parameterOrder": [ ++ "project", ++ "interconnectRemoteLocation" ++ ], ++ "parameters": { ++ "interconnectRemoteLocation": { ++ "description": "Name of the interconnect remote location to return.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ "response": { ++ "$ref": "InterconnectRemoteLocation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "list": { ++ "description": "Retrieves the list of interconnect remote locations available to the specified project.", ++ "flatPath": "projects/{project}/global/interconnectRemoteLocations", ++ "httpMethod": "GET", ++ "id": "compute.interconnectRemoteLocations.list", ++ "parameterOrder": [ ++ "project" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/global/interconnectRemoteLocations", ++ "response": { ++ "$ref": "InterconnectRemoteLocationList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ } ++ } ++ }, + "interconnects": { + "methods": { + "delete": { +@@ -15712,6 +15903,56 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "simulateMaintenanceEvent": { ++ "description": "Simulates maintenance event on specified nodes from the node group.", ++ "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", ++ "httpMethod": "POST", ++ "id": "compute.nodeGroups.simulateMaintenanceEvent", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "nodeGroup" ++ ], ++ "parameters": { ++ "nodeGroup": { ++ "description": "Name of the NodeGroup resource whose nodes will go under maintenance simulation.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", ++ "request": { ++ "$ref": "NodeGroupsSimulateMaintenanceEventRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", +@@ -25956,6 +26197,11 @@ + "minimum": "0", + "type": "integer" + }, ++ "natName": { ++ "description": "Name of the nat service to filter the Nat Mapping information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", ++ "location": "query", ++ "type": "string" ++ }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", +@@ -33123,7 +33369,7 @@ + } + } + }, +- "revision": "20230307", ++ "revision": "20230516", + "rootUrl": "https://compute.googleapis.com/", + "schemas": { + "AcceleratorConfig": { +@@ -33543,11 +33789,11 @@ + "id": "AccessConfig", + "properties": { + "externalIpv6": { +- "description": "The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", ++ "description": "Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", + "type": "string" + }, + "externalIpv6PrefixLength": { +- "description": "The prefix length of the external IPv6 range.", ++ "description": "Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range.", + "format": "int32", + "type": "integer" + }, +@@ -33557,11 +33803,11 @@ + "type": "string" + }, + "name": { +- "description": "The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.", ++ "description": "The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6.", + "type": "string" + }, + "natIP": { +- "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", ++ "description": "Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", + "type": "string" + }, + "networkTier": { +@@ -33589,8 +33835,7 @@ + "type": "boolean" + }, + "type": { +- "default": "ONE_TO_ONE_NAT", +- "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", ++ "description": "The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6.", + "enum": [ + "DIRECT_IPV6", + "ONE_TO_ONE_NAT" +@@ -33670,6 +33915,18 @@ + "description": "[Output Only] Type of the resource. Always compute#address for addresses.", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this Address, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an Address.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "name": { + "annotations": { + "required": [ +@@ -34357,6 +34614,18 @@ + ], + "type": "string" + }, ++ "savedState": { ++ "description": "For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api.", ++ "enum": [ ++ "DISK_SAVED_STATE_UNSPECIFIED", ++ "PRESERVED" ++ ], ++ "enumDescriptions": [ ++ "*[Default]* Disk state has not been preserved.", ++ "Disk state has been preserved." ++ ], ++ "type": "string" ++ }, + "shieldedInstanceInitialState": { + "$ref": "InitialStateConfig", + "description": "[Output Only] shielded vm initial state stored on disk" +@@ -34448,6 +34717,18 @@ + "format": "int64", + "type": "string" + }, ++ "provisionedThroughput": { ++ "description": "Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be between 1 and 7,124.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "replicaZones": { ++ "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. You can't use this option with boot disks.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "resourceManagerTags": { + "additionalProperties": { + "type": "string" +@@ -35067,7 +35348,7 @@ + "id": "AutoscalingPolicy", + "properties": { + "coolDownPeriodSec": { +- "description": "The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", ++ "description": "The number of seconds that your application takes to initialize on a VM instance. This is referred to as the [initialization period](/compute/docs/autoscaler#cool_down_period). Specifying an accurate initialization period improves autoscaler decisions. For example, when scaling out, the autoscaler ignores data from VMs that are still initializing because those VMs might not yet represent normal usage of your application. The default initialization period is 60 seconds. Initialization periods might vary because of numerous factors. We recommend that you test how long your application takes to initialize. To do this, create a VM and time your application's startup process.", + "format": "int32", + "type": "integer" + }, +@@ -35097,7 +35378,7 @@ + "type": "integer" + }, + "mode": { +- "description": "Defines operating mode for this policy.", ++ "description": "Defines the operating mode for this policy. The following modes are available: - OFF: Disables the autoscaler but maintains its configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM instances only. - ON: Enables all autoscaler activities according to its policy. For more information, see \"Turning off or restricting an autoscaler\"", + "enum": [ + "OFF", + "ON", +@@ -35806,6 +36087,13 @@ + "$ref": "Duration", + "description": "Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED." + }, ++ "metadatas": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Deployment metadata associated with the resource to be set by a GKE hub controller and read by the backend RCTH", ++ "type": "object" ++ }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +@@ -37095,7 +37383,7 @@ + "type": "string" + }, + "splitSourceCommitment": { +- "description": "Source commitment to be splitted into a new commitment.", ++ "description": "Source commitment to be split into a new commitment.", + "type": "string" + }, + "startTimestamp": { +@@ -37136,6 +37424,7 @@ + "GENERAL_PURPOSE_N2", + "GENERAL_PURPOSE_N2D", + "GENERAL_PURPOSE_T2D", ++ "GRAPHICS_OPTIMIZED", + "MEMORY_OPTIMIZED", + "MEMORY_OPTIMIZED_M3", + "TYPE_UNSPECIFIED" +@@ -37152,6 +37441,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -37802,6 +38092,17 @@ + ], + "type": "string" + }, ++ "asyncPrimaryDisk": { ++ "$ref": "DiskAsyncReplication", ++ "description": "Disk asynchronously replicated into this disk." ++ }, ++ "asyncSecondaryDisks": { ++ "additionalProperties": { ++ "$ref": "DiskAsyncReplicationList" ++ }, ++ "description": "[Output Only] A list of disks this disk is asynchronously replicated to.", ++ "type": "object" ++ }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" +@@ -37898,6 +38199,11 @@ + "format": "int64", + "type": "string" + }, ++ "provisionedThroughput": { ++ "description": "Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be between 1 and 7,124.", ++ "format": "int64", ++ "type": "string" ++ }, + "region": { + "description": "[Output Only] URL of the region where the disk resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" +@@ -37916,6 +38222,10 @@ + }, + "type": "array" + }, ++ "resourceStatus": { ++ "$ref": "DiskResourceStatus", ++ "description": "[Output Only] Status information for the disk resource." ++ }, + "satisfiesPzs": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" +@@ -37929,6 +38239,14 @@ + "format": "int64", + "type": "string" + }, ++ "sourceConsistencyGroupPolicy": { ++ "description": "[Output Only] URL of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", ++ "type": "string" ++ }, ++ "sourceConsistencyGroupPolicyId": { ++ "description": "[Output Only] ID of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", ++ "type": "string" ++ }, + "sourceDisk": { + "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", + "type": "string" +@@ -38128,6 +38446,37 @@ + }, + "type": "object" + }, ++ "DiskAsyncReplication": { ++ "id": "DiskAsyncReplication", ++ "properties": { ++ "consistencyGroupPolicy": { ++ "description": "[Output Only] URL of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, ++ "consistencyGroupPolicyId": { ++ "description": "[Output Only] ID of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, ++ "disk": { ++ "description": "The other disk asynchronously replicated to or from the current disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk ", ++ "type": "string" ++ }, ++ "diskId": { ++ "description": "[Output Only] The unique ID of the other disk asynchronously replicated to or from the current disk. This value identifies the exact disk that was used to create this replication. For example, if you started replicating the persistent disk from a disk that was later deleted and recreated under the same name, the disk ID would identify the exact version of the disk that was used.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "DiskAsyncReplicationList": { ++ "id": "DiskAsyncReplicationList", ++ "properties": { ++ "asyncReplicationDisk": { ++ "$ref": "DiskAsyncReplication" ++ } ++ }, ++ "type": "object" ++ }, + "DiskInstantiationConfig": { + "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", + "id": "DiskInstantiationConfig", +@@ -38317,6 +38666,47 @@ + }, + "type": "object" + }, ++ "DiskResourceStatus": { ++ "id": "DiskResourceStatus", ++ "properties": { ++ "asyncPrimaryDisk": { ++ "$ref": "DiskResourceStatusAsyncReplicationStatus" ++ }, ++ "asyncSecondaryDisks": { ++ "additionalProperties": { ++ "$ref": "DiskResourceStatusAsyncReplicationStatus" ++ }, ++ "description": "Key: disk, value: AsyncReplicationStatus message", ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "DiskResourceStatusAsyncReplicationStatus": { ++ "id": "DiskResourceStatusAsyncReplicationStatus", ++ "properties": { ++ "state": { ++ "enum": [ ++ "ACTIVE", ++ "CREATED", ++ "STARTING", ++ "STATE_UNSPECIFIED", ++ "STOPPED", ++ "STOPPING" ++ ], ++ "enumDescriptions": [ ++ "Replication is active.", ++ "Secondary disk is created and is waiting for replication to start.", ++ "Replication is starting.", ++ "", ++ "Replication is stopped.", ++ "Replication is stopping." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "DiskType": { + "description": "Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/v1/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks.", + "id": "DiskType", +@@ -39982,6 +40372,20 @@ + "description": "Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified.", + "id": "FirewallPolicyRuleMatcher", + "properties": { ++ "destAddressGroups": { ++ "description": "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "destFqdns": { ++ "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "destIpRanges": { + "description": "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000.", + "items": { +@@ -39989,6 +40393,20 @@ + }, + "type": "array" + }, ++ "destRegionCodes": { ++ "description": "Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of dest region codes allowed is 5000.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "destThreatIntelligences": { ++ "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "layer4Configs": { + "description": "Pairs of IP protocols and ports that the rule should match.", + "items": { +@@ -39996,6 +40414,20 @@ + }, + "type": "array" + }, ++ "srcAddressGroups": { ++ "description": "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "srcFqdns": { ++ "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "srcIpRanges": { + "description": "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000.", + "items": { +@@ -40003,12 +40435,26 @@ + }, + "type": "array" + }, ++ "srcRegionCodes": { ++ "description": "Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of source region codes allowed is 5000.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "srcSecureTags": { + "description": "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", + "items": { + "$ref": "FirewallPolicyRuleSecureTag" + }, + "type": "array" ++ }, ++ "srcThreatIntelligences": { ++ "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" + } + }, + "type": "object" +@@ -40113,6 +40559,10 @@ + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If the field is set to TRUE, clients can access ILB from all regions. Otherwise only allows access from clients in the same region as the internal load balancer.", + "type": "boolean" + }, ++ "allowPscGlobalAccess": { ++ "description": "This is used in PSC consumer ForwardingRule to control whether the PSC endpoint can be accessed from another region.", ++ "type": "boolean" ++ }, + "backendService": { + "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", + "type": "string" +@@ -40207,7 +40657,7 @@ + "type": "string" + }, + "network": { +- "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", ++ "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "type": "string" + }, + "networkTier": { +@@ -40717,6 +41167,20 @@ + }, + "type": "object" + }, ++ "GlobalAddressesMoveRequest": { ++ "id": "GlobalAddressesMoveRequest", ++ "properties": { ++ "description": { ++ "description": "An optional destination address description if intended to be different from the source.", ++ "type": "string" ++ }, ++ "destinationAddress": { ++ "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project /global/addresses/address - projects/project/global/addresses/address Note that destination project must be different from the source project. So /global/addresses/address is not valid partial url.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { + "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", + "properties": { +@@ -40874,13 +41338,14 @@ + "id": "GuestOsFeature", + "properties": { + "type": { +- "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling guest operating system features.", ++ "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.", + "enum": [ + "FEATURE_TYPE_UNSPECIFIED", + "GVNIC", + "MULTI_IP_SUBNET", + "SECURE_BOOT", + "SEV_CAPABLE", ++ "SEV_LIVE_MIGRATABLE", + "SEV_SNP_CAPABLE", + "UEFI_COMPATIBLE", + "VIRTIO_SCSI_MULTIQUEUE", +@@ -40895,6 +41360,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -41062,7 +41528,7 @@ + "type": "object" + }, + "HealthCheck": { +- "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/v1/healthChecks) * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.HealthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", ++ "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/v1/healthChecks) * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.healthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.healthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", + "id": "HealthCheck", + "properties": { + "checkIntervalSec": { +@@ -41757,7 +42223,7 @@ + "type": "string" + }, + "healthState": { +- "description": "Health state of the instance.", ++ "description": "Health state of the IPv4 address of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" +@@ -41830,10 +42296,10 @@ + "UNKNOWN" + ], + "enumDescriptions": [ +- "", +- "", +- "", +- "" ++ "Endpoint is being drained.", ++ "Endpoint is healthy.", ++ "Endpoint is unhealthy.", ++ "Health status of the endpoint is unknown." + ], + "type": "string" + } +@@ -42416,6 +42882,10 @@ + }, + "type": "array" + }, ++ "pathTemplateMatch": { ++ "description": "If specified, the route is a pattern match expression that must match the :path header once the query string is removed. A pattern match allows you to match - The value must be between 1 and 1024 characters - The pattern must start with a leading slash (\"/\") - There may be no more than 5 operators in pattern Precisely one of prefix_match, full_path_match, regex_match or path_template_match must be set.", ++ "type": "string" ++ }, + "prefixMatch": { + "description": "For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "type": "string" +@@ -43218,7 +43688,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -44080,7 +44550,7 @@ + "type": "string" + }, + "initialDelaySec": { +- "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", ++ "description": "The initial delay is the number of seconds that a new VM takes to initialize and run its startup script. During a VM's initial delay period, the MIG ignores unsuccessful health checks because the VM might be in the startup process. This prevents the MIG from prematurely recreating a VM. If the health check receives a healthy response during the initial delay, it indicates that the startup process is complete and the VM is ready. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.", + "format": "int32", + "type": "integer" + } +@@ -44444,7 +44914,7 @@ + "id": "InstanceGroupManagersDeleteInstancesRequest", + "properties": { + "instances": { +- "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", ++ "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have URL and can be deleted only by name. One cannot specify both URLs and names in a single request.", + "items": { + "type": "string" + }, +@@ -45986,7 +46456,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -46303,7 +46773,7 @@ + "type": "object" + }, + "Interconnect": { +- "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", ++ "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the Google Cloud network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", + "id": "Interconnect", + "properties": { + "adminEnabled": { +@@ -46375,6 +46845,18 @@ + "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this Interconnect, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an Interconnect.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "linkType": { + "description": "Type of link requested, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.", + "enum": [ +@@ -46426,6 +46908,10 @@ + "format": "int32", + "type": "integer" + }, ++ "remoteLocation": { ++ "description": "Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.", ++ "type": "string" ++ }, + "requestedLinkCount": { + "description": "Target number of physical links in the link bundle, as requested by the customer.", + "format": "int32", +@@ -46520,6 +47006,10 @@ + "description": "This field is not available.", + "type": "string" + }, ++ "configurationConstraints": { ++ "$ref": "InterconnectAttachmentConfigurationConstraints", ++ "description": "[Output Only] Constraints for this attachment, if any. The attachment does not work if these constraints are not met." ++ }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" +@@ -46585,7 +47075,7 @@ + "type": "string" + }, + "ipsecInternalAddresses": { +- "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool. Not currently available publicly. ", ++ "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool.", + "items": { + "type": "string" + }, +@@ -46596,6 +47086,18 @@ + "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this InterconnectAttachment, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an InterconnectAttachment.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "mtu": { + "description": "Maximum Transmission Unit (MTU), in bytes, of packets passing through this interconnect attachment. Only 1440 and 1500 are allowed. If not specified, the value will default to 1440.", + "format": "int32", +@@ -46639,6 +47141,10 @@ + "description": "[Output Only] URL of the region where the regional interconnect attachment resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, ++ "remoteService": { ++ "description": "[Output Only] If the attachment is on a Cross-Cloud Interconnect connection, this field contains the interconnect's remote location service provider. Example values: \"Amazon Web Services\" \"Microsoft Azure\". The field is set only for attachments on Cross-Cloud Interconnect connections. Its value is copied from the InterconnectRemoteLocation remoteService field.", ++ "type": "string" ++ }, + "router": { + "description": "URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network \u0026 region within which the Cloud Router is configured.", + "type": "string" +@@ -46685,6 +47191,11 @@ + ], + "type": "string" + }, ++ "subnetLength": { ++ "description": "Length of the IPv4 subnet mask. Allowed values: - 29 (default) - 30 The default value is 29, except for Cross-Cloud Interconnect connections that use an InterconnectRemoteLocation with a constraints.subnetLengthRange.min equal to 30. For example, connections that use an Azure remote location fall into this category. In these cases, the default value is 30, and requesting 29 returns an error. Where both 29 and 30 are allowed, 29 is preferred, because it gives Google Cloud Support more debugging visibility. ", ++ "format": "int32", ++ "type": "integer" ++ }, + "type": { + "description": "The type of interconnect attachment this is, which can take one of the following values: - DEDICATED: an attachment to a Dedicated Interconnect. - PARTNER: an attachment to a Partner Interconnect, created by the customer. - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner. ", + "enum": [ +@@ -46834,6 +47345,47 @@ + }, + "type": "object" + }, ++ "InterconnectAttachmentConfigurationConstraints": { ++ "id": "InterconnectAttachmentConfigurationConstraints", ++ "properties": { ++ "bgpMd5": { ++ "description": "[Output Only] Whether the attachment's BGP session requires/allows/disallows BGP MD5 authentication. This can take one of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. For example, a Cross-Cloud Interconnect connection to a remote cloud provider that requires BGP MD5 authentication has the interconnectRemoteLocation attachment_configuration_constraints.bgp_md5 field set to MD5_REQUIRED, and that property is propagated to the attachment. Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 is requested.", ++ "enum": [ ++ "MD5_OPTIONAL", ++ "MD5_REQUIRED", ++ "MD5_UNSUPPORTED" ++ ], ++ "enumDescriptions": [ ++ "MD5_OPTIONAL: BGP MD5 authentication is supported and can optionally be configured.", ++ "MD5_REQUIRED: BGP MD5 authentication must be configured.", ++ "MD5_UNSUPPORTED: BGP MD5 authentication must not be configured" ++ ], ++ "type": "string" ++ }, ++ "bgpPeerAsnRanges": { ++ "description": "[Output Only] List of ASN ranges that the remote location is known to support. Formatted as an array of inclusive ranges {min: min-value, max: max-value}. For example, [{min: 123, max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or anything in the range 64512-65534. This field is only advisory. Although the API accepts other ranges, these are the ranges that we recommend.", ++ "items": { ++ "$ref": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange": { ++ "id": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange", ++ "properties": { ++ "max": { ++ "format": "uint32", ++ "type": "integer" ++ }, ++ "min": { ++ "format": "uint32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "InterconnectAttachmentList": { + "description": "Response to the list request, and contains a list of interconnect attachments.", + "id": "InterconnectAttachmentList", +@@ -47727,6 +48279,308 @@ + }, + "type": "object" + }, ++ "InterconnectRemoteLocation": { ++ "description": "Represents a Cross-Cloud Interconnect Remote Location resource. You can use this resource to find remote location details about an Interconnect attachment (VLAN).", ++ "id": "InterconnectRemoteLocation", ++ "properties": { ++ "address": { ++ "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character.", ++ "type": "string" ++ }, ++ "attachmentConfigurationConstraints": { ++ "$ref": "InterconnectAttachmentConfigurationConstraints", ++ "description": "[Output Only] Subset of fields from InterconnectAttachment's |configurationConstraints| field that apply to all attachments for this remote location." ++ }, ++ "city": { ++ "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", ++ "type": "string" ++ }, ++ "constraints": { ++ "$ref": "InterconnectRemoteLocationConstraints", ++ "description": "[Output Only] Constraints on the parameters for creating Cross-Cloud Interconnect and associated InterconnectAttachments." ++ }, ++ "continent": { ++ "description": "[Output Only] Continent for this location, which can take one of the following values: - AFRICA - ASIA_PAC - EUROPE - NORTH_AMERICA - SOUTH_AMERICA ", ++ "enum": [ ++ "AFRICA", ++ "ASIA_PAC", ++ "EUROPE", ++ "NORTH_AMERICA", ++ "SOUTH_AMERICA" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ }, ++ "creationTimestamp": { ++ "description": "[Output Only] Creation timestamp in RFC3339 text format.", ++ "type": "string" ++ }, ++ "description": { ++ "description": "[Output Only] An optional description of the resource.", ++ "type": "string" ++ }, ++ "facilityProvider": { ++ "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX).", ++ "type": "string" ++ }, ++ "facilityProviderFacilityId": { ++ "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1).", ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "kind": { ++ "default": "compute#interconnectRemoteLocation", ++ "description": "[Output Only] Type of the resource. Always compute#interconnectRemoteLocation for interconnect remote locations.", ++ "type": "string" ++ }, ++ "lacp": { ++ "description": "[Output Only] Link Aggregation Control Protocol (LACP) constraints, which can take one of the following values: LACP_SUPPORTED, LACP_UNSUPPORTED", ++ "enum": [ ++ "LACP_SUPPORTED", ++ "LACP_UNSUPPORTED" ++ ], ++ "enumDescriptions": [ ++ "LACP_SUPPORTED: LACP is supported, and enabled by default on the Cross-Cloud Interconnect.", ++ "LACP_UNSUPPORTED: LACP is not supported and is not be enabled on this port. GetDiagnostics shows bundleAggregationType as \"static\". GCP does not support LAGs without LACP, so requestedLinkCount must be 1." ++ ], ++ "type": "string" ++ }, ++ "maxLagSize100Gbps": { ++ "description": "[Output Only] The maximum number of 100 Gbps ports supported in a link aggregation group (LAG). When linkType is 100 Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "maxLagSize10Gbps": { ++ "description": "[Output Only] The maximum number of 10 Gbps ports supported in a link aggregation group (LAG). When linkType is 10 Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "name": { ++ "description": "[Output Only] Name of the resource.", ++ "type": "string" ++ }, ++ "peeringdbFacilityId": { ++ "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb).", ++ "type": "string" ++ }, ++ "permittedConnections": { ++ "description": "[Output Only] Permitted connections.", ++ "items": { ++ "$ref": "InterconnectRemoteLocationPermittedConnections" ++ }, ++ "type": "array" ++ }, ++ "remoteService": { ++ "description": "[Output Only] Indicates the service provider present at the remote location. Example values: \"Amazon Web Services\", \"Microsoft Azure\".", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for the resource.", ++ "type": "string" ++ }, ++ "status": { ++ "description": "[Output Only] The status of this InterconnectRemoteLocation, which can take one of the following values: - CLOSED: The InterconnectRemoteLocation is closed and is unavailable for provisioning new Cross-Cloud Interconnects. - AVAILABLE: The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects. ", ++ "enum": [ ++ "AVAILABLE", ++ "CLOSED" ++ ], ++ "enumDescriptions": [ ++ "The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects.", ++ "The InterconnectRemoteLocation is closed for provisioning new Cross-Cloud Interconnects." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationConstraints": { ++ "id": "InterconnectRemoteLocationConstraints", ++ "properties": { ++ "portPairRemoteLocation": { ++ "description": "[Output Only] Port pair remote location constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to individual ports, but the UI uses this field when ordering a pair of ports, to prevent users from accidentally ordering something that is incompatible with their cloud provider. Specifically, when ordering a redundant pair of Cross-Cloud Interconnect ports, and one of them uses a remote location with portPairMatchingRemoteLocation set to matching, the UI requires that both ports use the same remote location.", ++ "enum": [ ++ "PORT_PAIR_MATCHING_REMOTE_LOCATION", ++ "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" ++ ], ++ "enumDescriptions": [ ++ "If PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider allocates ports in pairs, and the user should choose the same remote location for both ports.", ++ "If PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision a redundant pair of Cross-Cloud Interconnects using two different remote locations in the same city." ++ ], ++ "type": "string" ++ }, ++ "portPairVlan": { ++ "description": "[Output Only] Port pair VLAN constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, PORT_PAIR_MATCHING_VLAN", ++ "enum": [ ++ "PORT_PAIR_MATCHING_VLAN", ++ "PORT_PAIR_UNCONSTRAINED_VLAN" ++ ], ++ "enumDescriptions": [ ++ "If PORT_PAIR_MATCHING_VLAN, the Interconnect for this attachment is part of a pair of ports that should have matching VLAN allocations. This occurs with Cross-Cloud Interconnect to Azure remote locations. While GCP's API does not explicitly group pairs of ports, the UI uses this field to ensure matching VLAN ids when configuring a redundant VLAN pair.", ++ "PORT_PAIR_UNCONSTRAINED_VLAN means there is no constraint." ++ ], ++ "type": "string" ++ }, ++ "subnetLengthRange": { ++ "$ref": "InterconnectRemoteLocationConstraintsSubnetLengthRange", ++ "description": "[Output Only] [min-length, max-length] The minimum and maximum value (inclusive) for the IPv4 subnet length. For example, an interconnectRemoteLocation for Azure has {min: 30, max: 30} because Azure requires /30 subnets. This range specifies the values supported by both cloud providers. Interconnect currently supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no constraint on IPv4 subnet length, the range would thus be {min: 29, max: 30}. " ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationConstraintsSubnetLengthRange": { ++ "id": "InterconnectRemoteLocationConstraintsSubnetLengthRange", ++ "properties": { ++ "max": { ++ "format": "int32", ++ "type": "integer" ++ }, ++ "min": { ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationList": { ++ "description": "Response to the list request, and contains a list of interconnect remote locations.", ++ "id": "InterconnectRemoteLocationList", ++ "properties": { ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "description": "A list of InterconnectRemoteLocation resources.", ++ "items": { ++ "$ref": "InterconnectRemoteLocation" ++ }, ++ "type": "array" ++ }, ++ "kind": { ++ "default": "compute#interconnectRemoteLocationList", ++ "description": "[Output Only] Type of resource. Always compute#interconnectRemoteLocationList for lists of interconnect remote locations.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token lets you get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationPermittedConnections": { ++ "id": "InterconnectRemoteLocationPermittedConnections", ++ "properties": { ++ "interconnectLocation": { ++ "description": "[Output Only] URL of an Interconnect location that is permitted to connect to this Interconnect remote location.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "InterconnectsGetDiagnosticsResponse": { + "description": "Response for the InterconnectsGetDiagnosticsRequest.", + "id": "InterconnectsGetDiagnosticsResponse", +@@ -48464,7 +49318,7 @@ + "type": "integer" + }, + "guestAcceleratorType": { +- "description": "The accelerator type resource name, not a full URL, e.g. 'nvidia-tesla-k80'.", ++ "description": "The accelerator type resource name, not a full URL, e.g. nvidia-tesla-t4.", + "type": "string" + } + }, +@@ -48971,7 +49825,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -49236,7 +50090,7 @@ + "type": "string" + }, + "gatewayIPv4": { +- "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", ++ "description": "[Output Only] The gateway address for default routing out of the network, selected by Google Cloud.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", + "type": "string" + }, +@@ -49343,7 +50197,7 @@ + "type": "string" + }, + "fingerprint": { +- "description": "[Output Only] Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", ++ "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", + "format": "byte", + "type": "string" + }, +@@ -49368,7 +50222,7 @@ + "type": "string" + }, + "network": { +- "description": "[Output Only] The URL of the network which the Network Attachment belongs to.", ++ "description": "[Output Only] The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks.", + "type": "string" + }, + "producerAcceptLists": { +@@ -49532,7 +50386,7 @@ + "id": "NetworkAttachmentConnectedEndpoint", + "properties": { + "ipAddress": { +- "description": "The IP address assigned to the producer instance network interface. This value will be a range in case of Serverless.", ++ "description": "The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless.", + "type": "string" + }, + "projectIdOrNum": { +@@ -49540,7 +50394,7 @@ + "type": "string" + }, + "secondaryIpCidrRanges": { +- "description": "Alias IP ranges from the same subnetwork", ++ "description": "Alias IP ranges from the same subnetwork.", + "items": { + "type": "string" + }, +@@ -50919,7 +51773,7 @@ + "type": "integer" + }, + "stackType": { +- "description": "The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations.", ++ "description": "The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations.", + "enum": [ + "IPV4_IPV6", + "IPV4_ONLY" +@@ -52008,6 +52862,19 @@ + }, + "type": "object" + }, ++ "NodeGroupsSimulateMaintenanceEventRequest": { ++ "id": "NodeGroupsSimulateMaintenanceEventRequest", ++ "properties": { ++ "nodes": { ++ "description": "Names of the nodes to go under maintenance simulation.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "NodeTemplate": { + "description": "Represent a sole-tenant Node Template resource. You can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances.", + "id": "NodeTemplate", +@@ -55474,6 +56341,7 @@ + "COMMITTED_NVIDIA_A100_80GB_GPUS", + "COMMITTED_NVIDIA_A100_GPUS", + "COMMITTED_NVIDIA_K80_GPUS", ++ "COMMITTED_NVIDIA_L4_GPUS", + "COMMITTED_NVIDIA_P100_GPUS", + "COMMITTED_NVIDIA_P4_GPUS", + "COMMITTED_NVIDIA_T4_GPUS", +@@ -55525,11 +56393,15 @@ + "NETWORK_ATTACHMENTS", + "NETWORK_ENDPOINT_GROUPS", + "NETWORK_FIREWALL_POLICIES", ++ "NET_LB_SECURITY_POLICIES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION", + "NODE_GROUPS", + "NODE_TEMPLATES", + "NVIDIA_A100_80GB_GPUS", + "NVIDIA_A100_GPUS", + "NVIDIA_K80_GPUS", ++ "NVIDIA_L4_GPUS", + "NVIDIA_P100_GPUS", + "NVIDIA_P100_VWS_GPUS", + "NVIDIA_P4_GPUS", +@@ -55544,6 +56416,7 @@ + "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS", + "PREEMPTIBLE_NVIDIA_A100_GPUS", + "PREEMPTIBLE_NVIDIA_K80_GPUS", ++ "PREEMPTIBLE_NVIDIA_L4_GPUS", + "PREEMPTIBLE_NVIDIA_P100_GPUS", + "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", + "PREEMPTIBLE_NVIDIA_P4_GPUS", +@@ -55567,6 +56440,7 @@ + "ROUTES", + "SECURITY_POLICIES", + "SECURITY_POLICIES_PER_REGION", ++ "SECURITY_POLICY_ADVANCED_RULES_PER_REGION", + "SECURITY_POLICY_CEVAL_RULES", + "SECURITY_POLICY_RULES", + "SECURITY_POLICY_RULES_PER_REGION", +@@ -55624,6 +56498,7 @@ + "", + "", + "", ++ "", + "Guest CPUs", + "", + "", +@@ -55715,6 +56590,12 @@ + "", + "", + "", ++ "", ++ "", ++ "", ++ "", ++ "", ++ "", + "The total number of snapshots allowed for a single project.", + "", + "", +@@ -55868,6 +56749,20 @@ + }, + "type": "object" + }, ++ "RegionAddressesMoveRequest": { ++ "id": "RegionAddressesMoveRequest", ++ "properties": { ++ "description": { ++ "description": "An optional destination address description if intended to be different from the source.", ++ "type": "string" ++ }, ++ "destinationAddress": { ++ "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project/regions/region /addresses/address - projects/project/regions/region/addresses/address Note that destination project must be different from the source project. So /regions/region/addresses/address is not valid partial url.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "RegionAutoscalerList": { + "description": "Contains a list of autoscalers.", + "id": "RegionAutoscalerList", +@@ -59465,7 +60360,7 @@ + "type": "array" + }, + "sourceSubnetworkIpRangesToNat": { +- "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", ++ "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then there should not be any other Router.Nat section in any Router for this network in this region.", + "enum": [ + "ALL_SUBNETWORKS_ALL_IP_RANGES", + "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", +@@ -60675,6 +61570,18 @@ + "description": "[Output only] Type of the resource. Always compute#securityPolicyfor security policies", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the security policy.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +@@ -60727,15 +61634,15 @@ + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig": { +- "description": "Configuration options for L7 DDoS detection.", ++ "description": "Configuration options for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig", + "properties": { + "enable": { +- "description": "If set to true, enables CAAP for L7 DDoS detection.", ++ "description": "If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "boolean" + }, + "ruleVisibility": { +- "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules.", ++ "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "enum": [ + "PREMIUM", + "STANDARD" +@@ -60930,7 +61837,7 @@ + "id": "SecurityPolicyRecaptchaOptionsConfig", + "properties": { + "redirectSiteKey": { +- "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used.", ++ "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + } + }, +@@ -60950,7 +61857,7 @@ + "id": "SecurityPolicyRule", + "properties": { + "action": { +- "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", ++ "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", + "type": "string" + }, + "description": { +@@ -60959,7 +61866,7 @@ + }, + "headerAction": { + "$ref": "SecurityPolicyRuleHttpHeaderAction", +- "description": "Optional, additional actions that are performed on headers." ++ "description": "Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "kind": { + "default": "compute#securityPolicyRule", +@@ -60989,7 +61896,7 @@ + }, + "redirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action. Cannot be specified for any other actions." ++ "description": "Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + } + }, + "type": "object" +@@ -61031,7 +61938,7 @@ + }, + "expr": { + "$ref": "Expr", +- "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." ++ "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing `evaluateThreatIntelligence` require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies." + }, + "versionedExpr": { + "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", +@@ -61185,17 +62092,24 @@ + ], + "type": "string" + }, ++ "enforceOnKeyConfigs": { ++ "description": "If specified, any combination of values of enforce_on_key_type/enforce_on_key_name is treated as the key on which ratelimit threshold/action is enforced. You can specify up to 3 enforce_on_key_configs. If enforce_on_key_configs is specified, enforce_on_key must not be specified.", ++ "items": { ++ "$ref": "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig" ++ }, ++ "type": "array" ++ }, + "enforceOnKeyName": { + "description": "Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value.", + "type": "string" + }, + "exceedAction": { +- "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below.", ++ "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below. The `redirect` action is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + }, + "exceedRedirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect." ++ "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "rateLimitThreshold": { + "$ref": "SecurityPolicyRuleRateLimitOptionsThreshold", +@@ -61204,6 +62118,40 @@ + }, + "type": "object" + }, ++ "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig": { ++ "id": "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig", ++ "properties": { ++ "enforceOnKeyName": { ++ "description": "Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value.", ++ "type": "string" ++ }, ++ "enforceOnKeyType": { ++ "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", ++ "enum": [ ++ "ALL", ++ "HTTP_COOKIE", ++ "HTTP_HEADER", ++ "HTTP_PATH", ++ "IP", ++ "REGION_CODE", ++ "SNI", ++ "XFF_IP" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "", ++ "", ++ "", ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "SecurityPolicyRuleRateLimitOptionsThreshold": { + "id": "SecurityPolicyRuleRateLimitOptionsThreshold", + "properties": { +@@ -61247,11 +62195,11 @@ + "id": "SecuritySettings", + "properties": { + "clientTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted.", + "type": "string" + }, + "subjectAltNames": { +- "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact.", ++ "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode).", + "items": { + "type": "string" + }, +@@ -61328,7 +62276,7 @@ + "type": "object" + }, + "ServiceAttachment": { +- "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service. next tag = 20", ++ "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service.", + "id": "ServiceAttachment", + "properties": { + "connectedEndpoints": { +@@ -61425,6 +62373,10 @@ + "$ref": "Uint128", + "description": "[Output Only] An 128-bit global unique ID of the PSC service attachment." + }, ++ "reconcileConnections": { ++ "description": "This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. - If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . - If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. For newly created service attachment, this boolean defaults to true.", ++ "type": "boolean" ++ }, + "region": { + "description": "[Output Only] URL of the region where the service attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" +@@ -62059,6 +63011,7 @@ + "name": { + "annotations": { + "required": [ ++ "compute.disks.createSnapshot", + "compute.snapshots.insert" + ] + }, +@@ -63499,7 +64452,7 @@ + "type": "string" + }, + "enableFlowLogs": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "externalIpv6Prefix": { +@@ -63581,7 +64534,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "INTERNAL_HTTPS_LOAD_BALANCER", + "PRIVATE", +@@ -63603,7 +64556,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -63924,7 +64877,7 @@ + "type": "string" + }, + "enable": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. Flow logging isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "filterExpr": { +@@ -64775,7 +65728,7 @@ + "id": "TargetHttpsProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetHttpsProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetHttpsProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -64823,7 +65776,7 @@ + "type": "string" + }, + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -64881,7 +65834,7 @@ + "type": "string" + }, + "serverTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted.", + "type": "string" + }, + "sslCertificates": { +@@ -66080,7 +67033,7 @@ + "id": "TargetSslProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetSslProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetSslProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -66122,7 +67075,7 @@ + "id": "TargetSslProxy", + "properties": { + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -66766,6 +67719,18 @@ + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this TargetVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a TargetVpnGateway.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "name": { + "annotations": { + "required": [ +@@ -67821,6 +68786,10 @@ + "pathPrefixRewrite": { + "description": "Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. The value must be from 1 to 1024 characters.", + "type": "string" ++ }, ++ "pathTemplateRewrite": { ++ "description": " If specified, the pattern rewrites the URL path (based on the :path header) using the HTTP template syntax. A corresponding path_template_match must be specified. Any template variables must exist in the path_template_match field. - -At least one variable must be specified in the path_template_match field - You can omit variables from the rewritten URL - The * and ** operators cannot be matched unless they have a corresponding variable name - e.g. {format=*} or {var=**}. For example, a path_template_match of /static/{format=**} could be rewritten as /static/content/{format} to prefix /content to the URL. Variables can also be re-ordered in a rewrite, so that /{country}/{format}/{suffix=**} can be rewritten as /content/{format}/{country}/{suffix}. At least one non-empty routeRules[].matchRules[].path_template_match is required. Only one of path_prefix_rewrite or path_template_rewrite may be specified.", ++ "type": "string" + } + }, + "type": "object" +@@ -67858,7 +68827,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "INTERNAL_HTTPS_LOAD_BALANCER", + "PRIVATE", +@@ -67876,7 +68845,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -68663,7 +69632,7 @@ + "type": "integer" + }, + "peerGatewayInterface": { +- "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or GCP VPN gateway.", ++ "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "format": "uint32", + "type": "integer" + }, +@@ -68675,7 +69644,7 @@ + "type": "object" + }, + "VpnGatewayStatusVpnConnection": { +- "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be a external VPN gateway or GCP VPN gateway.", ++ "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "id": "VpnGatewayStatusVpnConnection", + "properties": { + "peerExternalGateway": { +@@ -68862,6 +69831,18 @@ + "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this VpnTunnel, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a VpnTunnel.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "localTrafficSelector": { + "description": "Local traffic selector to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges must be disjoint. Only IPv4 is supported.", + "items": { +diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go +index c30ae0d4e1d..47ec0a57f3a 100644 +--- a/vendor/google.golang.org/api/compute/v1/compute-gen.go ++++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go +@@ -75,6 +75,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "compute:v1" + const apiName = "compute" +@@ -172,6 +173,7 @@ func New(client *http.Client) (*Service, error) { + s.Instances = NewInstancesService(s) + s.InterconnectAttachments = NewInterconnectAttachmentsService(s) + s.InterconnectLocations = NewInterconnectLocationsService(s) ++ s.InterconnectRemoteLocations = NewInterconnectRemoteLocationsService(s) + s.Interconnects = NewInterconnectsService(s) + s.LicenseCodes = NewLicenseCodesService(s) + s.Licenses = NewLicensesService(s) +@@ -299,6 +301,8 @@ type Service struct { + + InterconnectLocations *InterconnectLocationsService + ++ InterconnectRemoteLocations *InterconnectRemoteLocationsService ++ + Interconnects *InterconnectsService + + LicenseCodes *LicenseCodesService +@@ -685,6 +689,15 @@ type InterconnectLocationsService struct { + s *Service + } + ++func NewInterconnectRemoteLocationsService(s *Service) *InterconnectRemoteLocationsService { ++ rs := &InterconnectRemoteLocationsService{s: s} ++ return rs ++} ++ ++type InterconnectRemoteLocationsService struct { ++ s *Service ++} ++ + func NewInterconnectsService(s *Service) *InterconnectsService { + rs := &InterconnectsService{s: s} + return rs +@@ -1917,32 +1930,35 @@ func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + // AccessConfig: An access configuration attached to an instance's + // network interface. Only one access config per instance is supported. + type AccessConfig struct { +- // ExternalIpv6: The first IPv6 address of the external IPv6 range +- // associated with this instance, prefix length is stored in +- // externalIpv6PrefixLength in ipv6AccessConfig. To use a static +- // external IP address, it must be unused and in the same region as the +- // instance's zone. If not specified, Google Cloud will automatically +- // assign an external IPv6 address from the instance's subnetwork. ++ // ExternalIpv6: Applies to ipv6AccessConfigs only. The first IPv6 ++ // address of the external IPv6 range associated with this instance, ++ // prefix length is stored in externalIpv6PrefixLength in ++ // ipv6AccessConfig. To use a static external IP address, it must be ++ // unused and in the same region as the instance's zone. If not ++ // specified, Google Cloud will automatically assign an external IPv6 ++ // address from the instance's subnetwork. + ExternalIpv6 string `json:"externalIpv6,omitempty"` + +- // ExternalIpv6PrefixLength: The prefix length of the external IPv6 +- // range. ++ // ExternalIpv6PrefixLength: Applies to ipv6AccessConfigs only. The ++ // prefix length of the external IPv6 range. + ExternalIpv6PrefixLength int64 `json:"externalIpv6PrefixLength,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#accessConfig + // for access configs. + Kind string `json:"kind,omitempty"` + +- // Name: The name of this access configuration. The default and +- // recommended name is External NAT, but you can use any arbitrary +- // string, such as My external IP or Network Access. ++ // Name: The name of this access configuration. In accessConfigs (IPv4), ++ // the default and recommended name is External NAT, but you can use any ++ // arbitrary string, such as My external IP or Network Access. In ++ // ipv6AccessConfigs, the recommend name is External IPv6. + Name string `json:"name,omitempty"` + +- // NatIP: An external IP address associated with this instance. Specify +- // an unused static external IP address available to the project or +- // leave this field undefined to use an IP from a shared ephemeral IP +- // address pool. If you specify a static external IP address, it must +- // live in the same region as the zone of the instance. ++ // NatIP: Applies to accessConfigs (IPv4) only. An external IP address ++ // associated with this instance. Specify an unused static external IP ++ // address available to the project or leave this field undefined to use ++ // an IP from a shared ephemeral IP address pool. If you specify a ++ // static external IP address, it must live in the same region as the ++ // zone of the instance. + NatIP string `json:"natIP,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -1978,12 +1994,13 @@ type AccessConfig struct { + // associated. + SetPublicPtr bool `json:"setPublicPtr,omitempty"` + +- // Type: The type of configuration. The default and only option is +- // ONE_TO_ONE_NAT. ++ // Type: The type of configuration. In accessConfigs (IPv4), the default ++ // and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default ++ // and only option is DIRECT_IPV6. + // + // Possible values: + // "DIRECT_IPV6" +- // "ONE_TO_ONE_NAT" (default) ++ // "ONE_TO_ONE_NAT" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExternalIpv6") to +@@ -2065,6 +2082,21 @@ type Address struct { + // addresses. + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // Address, which is essentially a hash of the labels set used for ++ // optimistic locking. The fingerprint is initially generated by Compute ++ // Engine and changes after every request to modify or update labels. ++ // You must always provide an up-to-date fingerprint hash in order to ++ // update or change labels, otherwise the request will fail with error ++ // 412 conditionNotMet. To see the latest fingerprint, make a get() ++ // request to retrieve an Address. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -3140,6 +3172,17 @@ type AttachedDisk struct { + // read-write mode. + Mode string `json:"mode,omitempty"` + ++ // SavedState: For LocalSSD disks on VM Instances in STOPPED or ++ // SUSPENDED state, this field is set to PRESERVED if the LocalSSD data ++ // has been saved to a persistent location by customer request. (see the ++ // discard_local_ssd option on Stop/Suspend). Read-only in the api. ++ // ++ // Possible values: ++ // "DISK_SAVED_STATE_UNSPECIFIED" - *[Default]* Disk state has not ++ // been preserved. ++ // "PRESERVED" - Disk state has been preserved. ++ SavedState string `json:"savedState,omitempty"` ++ + // ShieldedInstanceInitialState: [Output Only] shielded vm initial state + // stored on disk + ShieldedInstanceInitialState *InitialStateConfig `json:"shieldedInstanceInitialState,omitempty"` +@@ -3263,6 +3306,18 @@ type AttachedDiskInitializeParams struct { + // see the Extreme persistent disk documentation. + ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + ++ // ProvisionedThroughput: Indicates how much throughput to provision for ++ // the disk. This sets the number of throughput mb per second that the ++ // disk can handle. Values must be between 1 and 7,124. ++ ProvisionedThroughput int64 `json:"provisionedThroughput,omitempty,string"` ++ ++ // ReplicaZones: Required for each regional disk associated with the ++ // instance. Specify the URLs of the zones where the disk should be ++ // replicated to. You must provide exactly two replica zones, and one ++ // zone must be the same as the instance zone. You can't use this option ++ // with boot disks. ++ ReplicaZones []string `json:"replicaZones,omitempty"` ++ + // ResourceManagerTags: Resource manager tags to be bound to the disk. + // Tag keys and values have the same definition as resource manager + // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values +@@ -4262,15 +4317,17 @@ func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { + + // AutoscalingPolicy: Cloud Autoscaler policy. + type AutoscalingPolicy struct { +- // CoolDownPeriodSec: The number of seconds that the autoscaler waits +- // before it starts collecting information from a new instance. This +- // prevents the autoscaler from collecting information when the instance +- // is initializing, during which the collected usage would not be +- // reliable. The default time autoscaler waits is 60 seconds. Virtual +- // machine initialization times might vary because of numerous factors. +- // We recommend that you test how long an instance may take to +- // initialize. To do this, create an instance and time the startup +- // process. ++ // CoolDownPeriodSec: The number of seconds that your application takes ++ // to initialize on a VM instance. This is referred to as the ++ // initialization period (/compute/docs/autoscaler#cool_down_period). ++ // Specifying an accurate initialization period improves autoscaler ++ // decisions. For example, when scaling out, the autoscaler ignores data ++ // from VMs that are still initializing because those VMs might not yet ++ // represent normal usage of your application. The default ++ // initialization period is 60 seconds. Initialization periods might ++ // vary because of numerous factors. We recommend that you test how long ++ // your application takes to initialize. To do this, create a VM and ++ // time your application's startup process. + CoolDownPeriodSec int64 `json:"coolDownPeriodSec,omitempty"` + + // CpuUtilization: Defines the CPU utilization policy that allows the +@@ -4298,7 +4355,12 @@ type AutoscalingPolicy struct { + // instances allowed. + MinNumReplicas int64 `json:"minNumReplicas,omitempty"` + +- // Mode: Defines operating mode for this policy. ++ // Mode: Defines the operating mode for this policy. The following modes ++ // are available: - OFF: Disables the autoscaler but maintains its ++ // configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM ++ // instances only. - ON: Enables all autoscaler activities according to ++ // its policy. For more information, see "Turning off or restricting an ++ // autoscaler" + // + // Possible values: + // "OFF" - Do not automatically scale the MIG in or out. The +@@ -5591,6 +5653,10 @@ type BackendService struct { + // loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED. + MaxStreamDuration *Duration `json:"maxStreamDuration,omitempty"` + ++ // Metadatas: Deployment metadata associated with the resource to be set ++ // by a GKE hub controller and read by the backend RCTH ++ Metadatas map[string]string `json:"metadatas,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -7685,7 +7751,7 @@ type Commitment struct { + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + +- // SplitSourceCommitment: Source commitment to be splitted into a new ++ // SplitSourceCommitment: Source commitment to be split into a new + // commitment. + SplitSourceCommitment string `json:"splitSourceCommitment,omitempty"` + +@@ -7726,6 +7792,7 @@ type Commitment struct { + // "GENERAL_PURPOSE_N2" + // "GENERAL_PURPOSE_N2D" + // "GENERAL_PURPOSE_T2D" ++ // "GRAPHICS_OPTIMIZED" + // "MEMORY_OPTIMIZED" + // "MEMORY_OPTIMIZED_M3" + // "TYPE_UNSPECIFIED" +@@ -8773,6 +8840,13 @@ type Disk struct { + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + ++ // AsyncPrimaryDisk: Disk asynchronously replicated into this disk. ++ AsyncPrimaryDisk *DiskAsyncReplication `json:"asyncPrimaryDisk,omitempty"` ++ ++ // AsyncSecondaryDisks: [Output Only] A list of disks this disk is ++ // asynchronously replicated to. ++ AsyncSecondaryDisks map[string]DiskAsyncReplicationList `json:"asyncSecondaryDisks,omitempty"` ++ + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` +@@ -8877,6 +8951,11 @@ type Disk struct { + // see the Extreme persistent disk documentation. + ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + ++ // ProvisionedThroughput: Indicates how much throughput to provision for ++ // the disk. This sets the number of throughput mb per second that the ++ // disk can handle. Values must be between 1 and 7,124. ++ ProvisionedThroughput int64 `json:"provisionedThroughput,omitempty,string"` ++ + // Region: [Output Only] URL of the region where the disk resides. Only + // applicable for regional resources. You must specify this field as + // part of the HTTP request URL. It is not settable as a field in the +@@ -8891,6 +8970,10 @@ type Disk struct { + // automatic snapshot creations. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + ++ // ResourceStatus: [Output Only] Status information for the disk ++ // resource. ++ ResourceStatus *DiskResourceStatus `json:"resourceStatus,omitempty"` ++ + // SatisfiesPzs: [Output Only] Reserved for future use. + SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + +@@ -8906,6 +8989,16 @@ type Disk struct { + // source. Acceptable values are 1 to 65536, inclusive. + SizeGb int64 `json:"sizeGb,omitempty,string"` + ++ // SourceConsistencyGroupPolicy: [Output Only] URL of the ++ // DiskConsistencyGroupPolicy for a secondary disk that was created ++ // using a consistency group. ++ SourceConsistencyGroupPolicy string `json:"sourceConsistencyGroupPolicy,omitempty"` ++ ++ // SourceConsistencyGroupPolicyId: [Output Only] ID of the ++ // DiskConsistencyGroupPolicy for a secondary disk that was created ++ // using a consistency group. ++ SourceConsistencyGroupPolicyId string `json:"sourceConsistencyGroupPolicyId,omitempty"` ++ + // SourceDisk: The source disk used to create this disk. You can provide + // this as a partial or full URL to the resource. For example, the + // following are valid values: - +@@ -9230,6 +9323,86 @@ func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type DiskAsyncReplication struct { ++ // ConsistencyGroupPolicy: [Output Only] URL of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicy string `json:"consistencyGroupPolicy,omitempty"` ++ ++ // ConsistencyGroupPolicyId: [Output Only] ID of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicyId string `json:"consistencyGroupPolicyId,omitempty"` ++ ++ // Disk: The other disk asynchronously replicated to or from the current ++ // disk. You can provide this as a partial or full URL to the resource. ++ // For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /disks/disk - projects/project/zones/zone/disks/disk - ++ // zones/zone/disks/disk ++ Disk string `json:"disk,omitempty"` ++ ++ // DiskId: [Output Only] The unique ID of the other disk asynchronously ++ // replicated to or from the current disk. This value identifies the ++ // exact disk that was used to create this replication. For example, if ++ // you started replicating the persistent disk from a disk that was ++ // later deleted and recreated under the same name, the disk ID would ++ // identify the exact version of the disk that was used. ++ DiskId string `json:"diskId,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "ConsistencyGroupPolicy") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ConsistencyGroupPolicy") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskAsyncReplication) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskAsyncReplication ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type DiskAsyncReplicationList struct { ++ AsyncReplicationDisk *DiskAsyncReplication `json:"asyncReplicationDisk,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "AsyncReplicationDisk") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AsyncReplicationDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskAsyncReplicationList) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskAsyncReplicationList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // DiskInstantiationConfig: A specification of the desired way to + // instantiate a disk in the instance template when its created from a + // source instance. +@@ -9571,6 +9744,70 @@ func (s *DiskParams) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type DiskResourceStatus struct { ++ AsyncPrimaryDisk *DiskResourceStatusAsyncReplicationStatus `json:"asyncPrimaryDisk,omitempty"` ++ ++ // AsyncSecondaryDisks: Key: disk, value: AsyncReplicationStatus message ++ AsyncSecondaryDisks map[string]DiskResourceStatusAsyncReplicationStatus `json:"asyncSecondaryDisks,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "AsyncPrimaryDisk") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AsyncPrimaryDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskResourceStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskResourceStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type DiskResourceStatusAsyncReplicationStatus struct { ++ // Possible values: ++ // "ACTIVE" - Replication is active. ++ // "CREATED" - Secondary disk is created and is waiting for ++ // replication to start. ++ // "STARTING" - Replication is starting. ++ // "STATE_UNSPECIFIED" ++ // "STOPPED" - Replication is stopped. ++ // "STOPPING" - Replication is stopping. ++ State string `json:"state,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "State") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "State") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskResourceStatusAsyncReplicationStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskResourceStatusAsyncReplicationStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // DiskType: Represents a Disk Type resource. Google Compute Engine has + // two Disk Type resources: * Regional + // (/compute/docs/reference/rest/v1/regionDiskTypes) * Zonal +@@ -12303,38 +12540,77 @@ func (s *FirewallPolicyRule) MarshalJSON() ([]byte, error) { + // FirewallPolicyRuleMatcher: Represents a match condition that incoming + // traffic is evaluated against. Exactly one field must be specified. + type FirewallPolicyRuleMatcher struct { ++ // DestAddressGroups: Address groups which should be matched against the ++ // traffic destination. Maximum number of destination address groups is ++ // 10. ++ DestAddressGroups []string `json:"destAddressGroups,omitempty"` ++ ++ // DestFqdns: Fully Qualified Domain Name (FQDN) which should be matched ++ // against traffic destination. Maximum number of destination fqdn ++ // allowed is 100. ++ DestFqdns []string `json:"destFqdns,omitempty"` ++ + // DestIpRanges: CIDR IP address range. Maximum number of destination + // CIDR IP ranges allowed is 5000. + DestIpRanges []string `json:"destIpRanges,omitempty"` + ++ // DestRegionCodes: Region codes whose IP addresses will be used to ++ // match for destination of traffic. Should be specified as 2 letter ++ // country code defined as per ISO 3166 alpha-2 country codes. ex."US" ++ // Maximum number of dest region codes allowed is 5000. ++ DestRegionCodes []string `json:"destRegionCodes,omitempty"` ++ ++ // DestThreatIntelligences: Names of Network Threat Intelligence lists. ++ // The IPs in these lists will be matched against traffic destination. ++ DestThreatIntelligences []string `json:"destThreatIntelligences,omitempty"` ++ + // Layer4Configs: Pairs of IP protocols and ports that the rule should + // match. + Layer4Configs []*FirewallPolicyRuleMatcherLayer4Config `json:"layer4Configs,omitempty"` + ++ // SrcAddressGroups: Address groups which should be matched against the ++ // traffic source. Maximum number of source address groups is 10. ++ SrcAddressGroups []string `json:"srcAddressGroups,omitempty"` ++ ++ // SrcFqdns: Fully Qualified Domain Name (FQDN) which should be matched ++ // against traffic source. Maximum number of source fqdn allowed is 100. ++ SrcFqdns []string `json:"srcFqdns,omitempty"` ++ + // SrcIpRanges: CIDR IP address range. Maximum number of source CIDR IP + // ranges allowed is 5000. + SrcIpRanges []string `json:"srcIpRanges,omitempty"` + ++ // SrcRegionCodes: Region codes whose IP addresses will be used to match ++ // for source of traffic. Should be specified as 2 letter country code ++ // defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number ++ // of source region codes allowed is 5000. ++ SrcRegionCodes []string `json:"srcRegionCodes,omitempty"` ++ + // SrcSecureTags: List of secure tag values, which should be matched at + // the source of the traffic. For INGRESS rule, if all the srcSecureTag + // are INEFFECTIVE, and there is no srcIpRange, this rule will be + // ignored. Maximum number of source tag values allowed is 256. + SrcSecureTags []*FirewallPolicyRuleSecureTag `json:"srcSecureTags,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "DestIpRanges") to +- // unconditionally include in API requests. By default, fields with ++ // SrcThreatIntelligences: Names of Network Threat Intelligence lists. ++ // The IPs in these lists will be matched against traffic source. ++ SrcThreatIntelligences []string `json:"srcThreatIntelligences,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "DestAddressGroups") ++ // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "DestIpRanges") to include +- // in API requests with the JSON null value. By default, fields with +- // empty values are omitted from API requests. However, any field with +- // an empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. ++ // NullFields is a list of field names (e.g. "DestAddressGroups") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. + NullFields []string `json:"-"` + } + +@@ -12533,6 +12809,10 @@ type ForwardingRule struct { + // clients in the same region as the internal load balancer. + AllowGlobalAccess bool `json:"allowGlobalAccess,omitempty"` + ++ // AllowPscGlobalAccess: This is used in PSC consumer ForwardingRule to ++ // control whether the PSC endpoint can be accessed from another region. ++ AllowPscGlobalAccess bool `json:"allowPscGlobalAccess,omitempty"` ++ + // BackendService: Identifies the backend service to which the + // forwarding rule sends traffic. Required for Internal TCP/UDP Load + // Balancing and Network Load Balancing; must be omitted for all other +@@ -12651,9 +12931,10 @@ type ForwardingRule struct { + // Network: This field is not used for external load balancing. For + // Internal TCP/UDP Load Balancing, this field identifies the network + // that the load balanced IP should belong to for this Forwarding Rule. +- // If this field is not specified, the default network will be used. For +- // Private Service Connect forwarding rules that forward traffic to +- // Google APIs, a network must be provided. ++ // If the subnetwork is specified, the network of the subnetwork will be ++ // used. If neither subnetwork nor this field is specified, the default ++ // network will be used. For Private Service Connect forwarding rules ++ // that forward traffic to Google APIs, a network must be provided. + Network string `json:"network,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -13511,6 +13792,43 @@ func (s *GRPCHealthCheck) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type GlobalAddressesMoveRequest struct { ++ // Description: An optional destination address description if intended ++ // to be different from the source. ++ Description string `json:"description,omitempty"` ++ ++ // DestinationAddress: The URL of the destination address to move to. ++ // This can be a full or partial URL. For example, the following are all ++ // valid URLs to a address: - ++ // https://www.googleapis.com/compute/v1/projects/project ++ // /global/addresses/address - projects/project/global/addresses/address ++ // Note that destination project must be different from the source ++ // project. So /global/addresses/address is not valid partial url. ++ DestinationAddress string `json:"destinationAddress,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Description") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Description") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *GlobalAddressesMoveRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod GlobalAddressesMoveRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be attached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` +@@ -13800,8 +14118,8 @@ type GuestOsFeature struct { + // commas to separate values. Set to one or more of the following + // values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - + // UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - +- // SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling +- // guest operating system features. ++ // SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see ++ // Enabling guest operating system features. + // + // Possible values: + // "FEATURE_TYPE_UNSPECIFIED" +@@ -13809,6 +14127,7 @@ type GuestOsFeature struct { + // "MULTI_IP_SUBNET" + // "SECURE_BOOT" + // "SEV_CAPABLE" ++ // "SEV_LIVE_MIGRATABLE" + // "SEV_SNP_CAPABLE" + // "UEFI_COMPATIBLE" + // "VIRTIO_SCSI_MULTIQUEUE" +@@ -14126,12 +14445,12 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { + // (/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) + // load balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Traffic Director must use global +-// health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load ++// health checks (`compute.v1.healthChecks`). Internal TCP/UDP load + // balancers can use either regional or global health checks +-// (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). ++// (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). + // External HTTP(S), TCP proxy, and SSL proxy load balancers as well as + // managed instance group auto-healing must use global health checks +-// (`compute.v1.HealthChecks`). Backend service-based network load ++// (`compute.v1.healthChecks`). Backend service-based network load + // balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Target pool-based network load + // balancers must use legacy HTTP health checks +@@ -15209,7 +15528,7 @@ type HealthStatus struct { + // instance. + ForwardingRuleIp string `json:"forwardingRuleIp,omitempty"` + +- // HealthState: Health state of the instance. ++ // HealthState: Health state of the IPv4 address of the instance. + // + // Possible values: + // "HEALTHY" +@@ -15294,10 +15613,10 @@ type HealthStatusForNetworkEndpoint struct { + // the health checks configured. + // + // Possible values: +- // "DRAINING" +- // "HEALTHY" +- // "UNHEALTHY" +- // "UNKNOWN" ++ // "DRAINING" - Endpoint is being drained. ++ // "HEALTHY" - Endpoint is healthy. ++ // "UNHEALTHY" - Endpoint is unhealthy. ++ // "UNKNOWN" - Health status of the endpoint is unknown. + HealthState string `json:"healthState,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendService") to +@@ -16450,6 +16769,15 @@ type HttpRouteRuleMatch struct { + // validateForProxyless field set to true. + MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` + ++ // PathTemplateMatch: If specified, the route is a pattern match ++ // expression that must match the :path header once the query string is ++ // removed. A pattern match allows you to match - The value must be ++ // between 1 and 1024 characters - The pattern must start with a leading ++ // slash ("/") - There may be no more than 5 operators in pattern ++ // Precisely one of prefix_match, full_path_match, regex_match or ++ // path_template_match must be set. ++ PathTemplateMatch string `json:"pathTemplateMatch,omitempty"` ++ + // PrefixMatch: For satisfying the matchRule condition, the request's + // path must begin with the specified prefixMatch. prefixMatch must + // begin with a /. The value must be from 1 to 1024 characters. Only one +@@ -17520,9 +17848,9 @@ type Instance struct { + // cycle. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -18775,13 +19103,14 @@ type InstanceGroupManagerAutoHealingPolicy struct { + // HealthCheck: The URL for the health check that signals autohealing. + HealthCheck string `json:"healthCheck,omitempty"` + +- // InitialDelaySec: The number of seconds that the managed instance +- // group waits before it applies autohealing policies to new instances +- // or recently recreated instances. This initial delay allows instances +- // to initialize and run their startup scripts before the instance group +- // determines that they are UNHEALTHY. This prevents the managed +- // instance group from recreating its instances prematurely. This value +- // must be from range [0, 3600]. ++ // InitialDelaySec: The initial delay is the number of seconds that a ++ // new VM takes to initialize and run its startup script. During a VM's ++ // initial delay period, the MIG ignores unsuccessful health checks ++ // because the VM might be in the startup process. This prevents the MIG ++ // from prematurely recreating a VM. If the health check receives a ++ // healthy response during the initial delay, it indicates that the ++ // startup process is complete and the VM is ready. The value of initial ++ // delay must be between 0 and 3600 seconds. The default value is 0. + InitialDelaySec int64 `json:"initialDelaySec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthCheck") to +@@ -19452,7 +19781,9 @@ func (s *InstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, err + type InstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The URLs of one or more instances to delete. This can be a + // full URL or a partial URL, such as +- // zones/[ZONE]/instances/[INSTANCE_NAME]. ++ // zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have ++ // URL and can be deleted only by name. One cannot specify both URLs and ++ // names in a single request. + Instances []string `json:"instances,omitempty"` + + // SkipInstancesOnValidationError: Specifies whether the request should +@@ -22010,9 +22341,9 @@ type InstanceWithNamedPorts struct { + // Status: [Output Only] The status of the instance. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -22604,9 +22935,9 @@ func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { + } + + // Interconnect: Represents an Interconnect resource. An Interconnect +-// resource is a dedicated connection between the GCP network and your +-// on-premises network. For more information, read the Dedicated +-// Interconnect Overview. ++// resource is a dedicated connection between the Google Cloud network ++// and your on-premises network. For more information, read the ++// Dedicated Interconnect Overview. + type Interconnect struct { + // AdminEnabled: Administrative status of the interconnect. When this is + // set to true, the Interconnect is functional and can carry traffic. +@@ -22671,6 +23002,21 @@ type Interconnect struct { + // for interconnects. + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // Interconnect, which is essentially a hash of the labels set used for ++ // optimistic locking. The fingerprint is initially generated by Compute ++ // Engine and changes after every request to modify or update labels. ++ // You must always provide an up-to-date fingerprint hash in order to ++ // update or change labels, otherwise the request will fail with error ++ // 412 conditionNotMet. To see the latest fingerprint, make a get() ++ // request to retrieve an Interconnect. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // LinkType: Type of link requested, which can take one of the following + // values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - + // LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that +@@ -22731,6 +23077,11 @@ type Interconnect struct { + // provisioned in this interconnect. + ProvisionedLinkCount int64 `json:"provisionedLinkCount,omitempty"` + ++ // RemoteLocation: Indicates that this is a Cross-Cloud Interconnect. ++ // This field specifies the location outside of Google's network that ++ // the interconnect is connected to. ++ RemoteLocation string `json:"remoteLocation,omitempty"` ++ + // RequestedLinkCount: Target number of physical links in the link + // bundle, as requested by the customer. + RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` +@@ -22846,6 +23197,11 @@ type InterconnectAttachment struct { + // CloudRouterIpv6InterfaceId: This field is not available. + CloudRouterIpv6InterfaceId string `json:"cloudRouterIpv6InterfaceId,omitempty"` + ++ // ConfigurationConstraints: [Output Only] Constraints for this ++ // attachment, if any. The attachment does not work if these constraints ++ // are not met. ++ ConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"configurationConstraints,omitempty"` ++ + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` +@@ -22938,14 +23294,28 @@ type InterconnectAttachment struct { + // attachment. If this field is not specified when creating the VLAN + // attachment, then later on when creating an HA VPN gateway on this + // VLAN attachment, the HA VPN gateway's IP address is allocated from +- // the regional external IP address pool. Not currently available +- // publicly. ++ // the regional external IP address pool. + IpsecInternalAddresses []string `json:"ipsecInternalAddresses,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#interconnectAttachment for interconnect attachments. + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // InterconnectAttachment, which is essentially a hash of the labels set ++ // used for optimistic locking. The fingerprint is initially generated ++ // by Compute Engine and changes after every request to modify or update ++ // labels. You must always provide an up-to-date fingerprint hash in ++ // order to update or change labels, otherwise the request will fail ++ // with error 412 conditionNotMet. To see the latest fingerprint, make a ++ // get() request to retrieve an InterconnectAttachment. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // Mtu: Maximum Transmission Unit (MTU), in bytes, of packets passing + // through this interconnect attachment. Only 1440 and 1500 are allowed. + // If not specified, the value will default to 1440. +@@ -23001,6 +23371,14 @@ type InterconnectAttachment struct { + // body. + Region string `json:"region,omitempty"` + ++ // RemoteService: [Output Only] If the attachment is on a Cross-Cloud ++ // Interconnect connection, this field contains the interconnect's ++ // remote location service provider. Example values: "Amazon Web ++ // Services" "Microsoft Azure". The field is set only for attachments on ++ // Cross-Cloud Interconnect connections. Its value is copied from the ++ // InterconnectRemoteLocation remoteService field. ++ RemoteService string `json:"remoteService,omitempty"` ++ + // Router: URL of the Cloud Router to be used for dynamic routing. This + // router must be in the same region as this InterconnectAttachment. The + // InterconnectAttachment will automatically connect the Interconnect to +@@ -23064,6 +23442,16 @@ type InterconnectAttachment struct { + // yet, because turnup is not complete. + State string `json:"state,omitempty"` + ++ // SubnetLength: Length of the IPv4 subnet mask. Allowed values: - 29 ++ // (default) - 30 The default value is 29, except for Cross-Cloud ++ // Interconnect connections that use an InterconnectRemoteLocation with ++ // a constraints.subnetLengthRange.min equal to 30. For example, ++ // connections that use an Azure remote location fall into this ++ // category. In these cases, the default value is 30, and requesting 29 ++ // returns an error. Where both 29 and 30 are allowed, 29 is preferred, ++ // because it gives Google Cloud Support more debugging visibility. ++ SubnetLength int64 `json:"subnetLength,omitempty"` ++ + // Type: The type of interconnect attachment this is, which can take one + // of the following values: - DEDICATED: an attachment to a Dedicated + // Interconnect. - PARTNER: an attachment to a Partner Interconnect, +@@ -23302,6 +23690,87 @@ func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InterconnectAttachmentConfigurationConstraints struct { ++ // BgpMd5: [Output Only] Whether the attachment's BGP session ++ // requires/allows/disallows BGP MD5 authentication. This can take one ++ // of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. ++ // For example, a Cross-Cloud Interconnect connection to a remote cloud ++ // provider that requires BGP MD5 authentication has the ++ // interconnectRemoteLocation ++ // attachment_configuration_constraints.bgp_md5 field set to ++ // MD5_REQUIRED, and that property is propagated to the attachment. ++ // Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 ++ // is requested. ++ // ++ // Possible values: ++ // "MD5_OPTIONAL" - MD5_OPTIONAL: BGP MD5 authentication is supported ++ // and can optionally be configured. ++ // "MD5_REQUIRED" - MD5_REQUIRED: BGP MD5 authentication must be ++ // configured. ++ // "MD5_UNSUPPORTED" - MD5_UNSUPPORTED: BGP MD5 authentication must ++ // not be configured ++ BgpMd5 string `json:"bgpMd5,omitempty"` ++ ++ // BgpPeerAsnRanges: [Output Only] List of ASN ranges that the remote ++ // location is known to support. Formatted as an array of inclusive ++ // ranges {min: min-value, max: max-value}. For example, [{min: 123, ++ // max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or ++ // anything in the range 64512-65534. This field is only advisory. ++ // Although the API accepts other ranges, these are the ranges that we ++ // recommend. ++ BgpPeerAsnRanges []*InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange `json:"bgpPeerAsnRanges,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "BgpMd5") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "BgpMd5") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectAttachmentConfigurationConstraints) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectAttachmentConfigurationConstraints ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange struct { ++ Max int64 `json:"max,omitempty"` ++ ++ Min int64 `json:"min,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Max") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Max") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // InterconnectAttachmentList: Response to the list request, and + // contains a list of interconnect attachments. + type InterconnectAttachmentList struct { +@@ -24690,6 +25159,468 @@ func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// InterconnectRemoteLocation: Represents a Cross-Cloud Interconnect ++// Remote Location resource. You can use this resource to find remote ++// location details about an Interconnect attachment (VLAN). ++type InterconnectRemoteLocation struct { ++ // Address: [Output Only] The postal address of the Point of Presence, ++ // each line in the address is separated by a newline character. ++ Address string `json:"address,omitempty"` ++ ++ // AttachmentConfigurationConstraints: [Output Only] Subset of fields ++ // from InterconnectAttachment's |configurationConstraints| field that ++ // apply to all attachments for this remote location. ++ AttachmentConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"attachmentConfigurationConstraints,omitempty"` ++ ++ // City: [Output Only] Metropolitan area designator that indicates which ++ // city an interconnect is located. For example: "Chicago, IL", ++ // "Amsterdam, Netherlands". ++ City string `json:"city,omitempty"` ++ ++ // Constraints: [Output Only] Constraints on the parameters for creating ++ // Cross-Cloud Interconnect and associated InterconnectAttachments. ++ Constraints *InterconnectRemoteLocationConstraints `json:"constraints,omitempty"` ++ ++ // Continent: [Output Only] Continent for this location, which can take ++ // one of the following values: - AFRICA - ASIA_PAC - EUROPE - ++ // NORTH_AMERICA - SOUTH_AMERICA ++ // ++ // Possible values: ++ // "AFRICA" ++ // "ASIA_PAC" ++ // "EUROPE" ++ // "NORTH_AMERICA" ++ // "SOUTH_AMERICA" ++ Continent string `json:"continent,omitempty"` ++ ++ // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text ++ // format. ++ CreationTimestamp string `json:"creationTimestamp,omitempty"` ++ ++ // Description: [Output Only] An optional description of the resource. ++ Description string `json:"description,omitempty"` ++ ++ // FacilityProvider: [Output Only] The name of the provider for this ++ // facility (e.g., EQUINIX). ++ FacilityProvider string `json:"facilityProvider,omitempty"` ++ ++ // FacilityProviderFacilityId: [Output Only] A provider-assigned ++ // Identifier for this facility (e.g., Ashburn-DC1). ++ FacilityProviderFacilityId string `json:"facilityProviderFacilityId,omitempty"` ++ ++ // Id: [Output Only] The unique identifier for the resource. This ++ // identifier is defined by the server. ++ Id uint64 `json:"id,omitempty,string"` ++ ++ // Kind: [Output Only] Type of the resource. Always ++ // compute#interconnectRemoteLocation for interconnect remote locations. ++ Kind string `json:"kind,omitempty"` ++ ++ // Lacp: [Output Only] Link Aggregation Control Protocol (LACP) ++ // constraints, which can take one of the following values: ++ // LACP_SUPPORTED, LACP_UNSUPPORTED ++ // ++ // Possible values: ++ // "LACP_SUPPORTED" - LACP_SUPPORTED: LACP is supported, and enabled ++ // by default on the Cross-Cloud Interconnect. ++ // "LACP_UNSUPPORTED" - LACP_UNSUPPORTED: LACP is not supported and is ++ // not be enabled on this port. GetDiagnostics shows ++ // bundleAggregationType as "static". GCP does not support LAGs without ++ // LACP, so requestedLinkCount must be 1. ++ Lacp string `json:"lacp,omitempty"` ++ ++ // MaxLagSize100Gbps: [Output Only] The maximum number of 100 Gbps ports ++ // supported in a link aggregation group (LAG). When linkType is 100 ++ // Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps. ++ MaxLagSize100Gbps int64 `json:"maxLagSize100Gbps,omitempty"` ++ ++ // MaxLagSize10Gbps: [Output Only] The maximum number of 10 Gbps ports ++ // supported in a link aggregation group (LAG). When linkType is 10 ++ // Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps. ++ MaxLagSize10Gbps int64 `json:"maxLagSize10Gbps,omitempty"` ++ ++ // Name: [Output Only] Name of the resource. ++ Name string `json:"name,omitempty"` ++ ++ // PeeringdbFacilityId: [Output Only] The peeringdb identifier for this ++ // facility (corresponding with a netfac type in peeringdb). ++ PeeringdbFacilityId string `json:"peeringdbFacilityId,omitempty"` ++ ++ // PermittedConnections: [Output Only] Permitted connections. ++ PermittedConnections []*InterconnectRemoteLocationPermittedConnections `json:"permittedConnections,omitempty"` ++ ++ // RemoteService: [Output Only] Indicates the service provider present ++ // at the remote location. Example values: "Amazon Web Services", ++ // "Microsoft Azure". ++ RemoteService string `json:"remoteService,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for the resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Status: [Output Only] The status of this InterconnectRemoteLocation, ++ // which can take one of the following values: - CLOSED: The ++ // InterconnectRemoteLocation is closed and is unavailable for ++ // provisioning new Cross-Cloud Interconnects. - AVAILABLE: The ++ // InterconnectRemoteLocation is available for provisioning new ++ // Cross-Cloud Interconnects. ++ // ++ // Possible values: ++ // "AVAILABLE" - The InterconnectRemoteLocation is available for ++ // provisioning new Cross-Cloud Interconnects. ++ // "CLOSED" - The InterconnectRemoteLocation is closed for ++ // provisioning new Cross-Cloud Interconnects. ++ Status string `json:"status,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Address") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Address") to include in ++ // API requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocation) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocation ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationConstraints struct { ++ // PortPairRemoteLocation: [Output Only] Port pair remote location ++ // constraints, which can take one of the following values: ++ // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, ++ // PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to ++ // individual ports, but the UI uses this field when ordering a pair of ++ // ports, to prevent users from accidentally ordering something that is ++ // incompatible with their cloud provider. Specifically, when ordering a ++ // redundant pair of Cross-Cloud Interconnect ports, and one of them ++ // uses a remote location with portPairMatchingRemoteLocation set to ++ // matching, the UI requires that both ports use the same remote ++ // location. ++ // ++ // Possible values: ++ // "PORT_PAIR_MATCHING_REMOTE_LOCATION" - If ++ // PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider ++ // allocates ports in pairs, and the user should choose the same remote ++ // location for both ports. ++ // "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" - If ++ // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision ++ // a redundant pair of Cross-Cloud Interconnects using two different ++ // remote locations in the same city. ++ PortPairRemoteLocation string `json:"portPairRemoteLocation,omitempty"` ++ ++ // PortPairVlan: [Output Only] Port pair VLAN constraints, which can ++ // take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, ++ // PORT_PAIR_MATCHING_VLAN ++ // ++ // Possible values: ++ // "PORT_PAIR_MATCHING_VLAN" - If PORT_PAIR_MATCHING_VLAN, the ++ // Interconnect for this attachment is part of a pair of ports that ++ // should have matching VLAN allocations. This occurs with Cross-Cloud ++ // Interconnect to Azure remote locations. While GCP's API does not ++ // explicitly group pairs of ports, the UI uses this field to ensure ++ // matching VLAN ids when configuring a redundant VLAN pair. ++ // "PORT_PAIR_UNCONSTRAINED_VLAN" - PORT_PAIR_UNCONSTRAINED_VLAN means ++ // there is no constraint. ++ PortPairVlan string `json:"portPairVlan,omitempty"` ++ ++ // SubnetLengthRange: [Output Only] [min-length, max-length] The minimum ++ // and maximum value (inclusive) for the IPv4 subnet length. For ++ // example, an interconnectRemoteLocation for Azure has {min: 30, max: ++ // 30} because Azure requires /30 subnets. This range specifies the ++ // values supported by both cloud providers. Interconnect currently ++ // supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no ++ // constraint on IPv4 subnet length, the range would thus be {min: 29, ++ // max: 30}. ++ SubnetLengthRange *InterconnectRemoteLocationConstraintsSubnetLengthRange `json:"subnetLengthRange,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "PortPairRemoteLocation") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "PortPairRemoteLocation") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationConstraints) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationConstraints ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationConstraintsSubnetLengthRange struct { ++ Max int64 `json:"max,omitempty"` ++ ++ Min int64 `json:"min,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Max") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Max") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationConstraintsSubnetLengthRange) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationConstraintsSubnetLengthRange ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectRemoteLocationList: Response to the list request, and ++// contains a list of interconnect remote locations. ++type InterconnectRemoteLocationList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InterconnectRemoteLocation resources. ++ Items []*InterconnectRemoteLocation `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always ++ // compute#interconnectRemoteLocationList for lists of interconnect ++ // remote locations. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token lets you get the next page of ++ // results for list requests. If the number of results is larger than ++ // maxResults, use the nextPageToken as a value for the query parameter ++ // pageToken in the next list request. Subsequent list requests will ++ // have their own nextPageToken to continue paging through the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *InterconnectRemoteLocationListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectRemoteLocationListWarning: [Output Only] Informational ++// warning message. ++type InterconnectRemoteLocationListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InterconnectRemoteLocationListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationPermittedConnections struct { ++ // InterconnectLocation: [Output Only] URL of an Interconnect location ++ // that is permitted to connect to this Interconnect remote location. ++ InterconnectLocation string `json:"interconnectLocation,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "InterconnectLocation") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "InterconnectLocation") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationPermittedConnections ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // InterconnectsGetDiagnosticsResponse: Response for the + // InterconnectsGetDiagnosticsRequest. + type InterconnectsGetDiagnosticsResponse struct { +@@ -25936,7 +26867,7 @@ type MachineTypeAccelerators struct { + GuestAcceleratorCount int64 `json:"guestAcceleratorCount,omitempty"` + + // GuestAcceleratorType: The accelerator type resource name, not a full +- // URL, e.g. 'nvidia-tesla-k80'. ++ // URL, e.g. nvidia-tesla-t4. + GuestAcceleratorType string `json:"guestAcceleratorType,omitempty"` + + // ForceSendFields is a list of field names (e.g. +@@ -26618,9 +27549,9 @@ type ManagedInstance struct { + // is empty when the instance does not exist. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -27130,7 +28061,7 @@ type Network struct { + FirewallPolicy string `json:"firewallPolicy,omitempty"` + + // GatewayIPv4: [Output Only] The gateway address for default routing +- // out of the network, selected by GCP. ++ // out of the network, selected by Google Cloud. + GatewayIPv4 string `json:"gatewayIPv4,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This +@@ -27242,10 +28173,9 @@ type NetworkAttachment struct { + // property when you create the resource. + Description string `json:"description,omitempty"` + +- // Fingerprint: [Output Only] Fingerprint of this resource. A hash of +- // the contents stored in this object. This field is used in optimistic +- // locking. An up-to-date fingerprint must be provided in order to +- // patch. ++ // Fingerprint: Fingerprint of this resource. A hash of the contents ++ // stored in this object. This field is used in optimistic locking. An ++ // up-to-date fingerprint must be provided in order to patch. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The +@@ -27265,7 +28195,11 @@ type NetworkAttachment struct { + Name string `json:"name,omitempty"` + + // Network: [Output Only] The URL of the network which the Network +- // Attachment belongs to. ++ // Attachment belongs to. Practically it is inferred by fetching the ++ // network of the first subnetwork associated. Because it is required ++ // that all the subnetworks must be from the same network, it is assured ++ // that the Network Attachment belongs to the same network as all the ++ // subnetworks. + Network string `json:"network,omitempty"` + + // ProducerAcceptLists: Projects that are allowed to connect to this +@@ -27516,7 +28450,7 @@ func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, erro + // NetworkAttachmentConnectedEndpoint: [Output Only] A connection + // connected to this network attachment. + type NetworkAttachmentConnectedEndpoint struct { +- // IpAddress: The IP address assigned to the producer instance network ++ // IpAddress: The IPv4 address assigned to the producer instance network + // interface. This value will be a range in case of Serverless. + IpAddress string `json:"ipAddress,omitempty"` + +@@ -27524,7 +28458,7 @@ type NetworkAttachmentConnectedEndpoint struct { + // the IP was assigned. + ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` + +- // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork ++ // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork. + SecondaryIpCidrRanges []string `json:"secondaryIpCidrRanges,omitempty"` + + // Status: The status of a connected endpoint to this network +@@ -29708,10 +30642,11 @@ type NetworkInterface struct { + // number. It'll be empty if not specified by the users. + QueueCount int64 `json:"queueCount,omitempty"` + +- // StackType: The stack type for this network interface to identify +- // whether the IPv6 feature is enabled or not. If not specified, +- // IPV4_ONLY will be used. This field can be both set at instance +- // creation and update network interface operations. ++ // StackType: The stack type for this network interface. To assign only ++ // IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 ++ // addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This ++ // field can be both set at instance creation and update network ++ // interface operations. + // + // Possible values: + // "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 +@@ -31398,6 +32333,33 @@ func (s *NodeGroupsSetNodeTemplateRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type NodeGroupsSimulateMaintenanceEventRequest struct { ++ // Nodes: Names of the nodes to go under maintenance simulation. ++ Nodes []string `json:"nodes,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Nodes") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Nodes") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NodeGroupsSimulateMaintenanceEventRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod NodeGroupsSimulateMaintenanceEventRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // NodeTemplate: Represent a sole-tenant Node Template resource. You can + // use a template to define properties for nodes in a node group. For + // more information, read Creating node groups and instances. +@@ -36809,6 +37771,7 @@ type Quota struct { + // "COMMITTED_NVIDIA_A100_80GB_GPUS" + // "COMMITTED_NVIDIA_A100_GPUS" + // "COMMITTED_NVIDIA_K80_GPUS" ++ // "COMMITTED_NVIDIA_L4_GPUS" + // "COMMITTED_NVIDIA_P100_GPUS" + // "COMMITTED_NVIDIA_P4_GPUS" + // "COMMITTED_NVIDIA_T4_GPUS" +@@ -36860,11 +37823,15 @@ type Quota struct { + // "NETWORK_ATTACHMENTS" + // "NETWORK_ENDPOINT_GROUPS" + // "NETWORK_FIREWALL_POLICIES" ++ // "NET_LB_SECURITY_POLICIES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION" + // "NODE_GROUPS" + // "NODE_TEMPLATES" + // "NVIDIA_A100_80GB_GPUS" + // "NVIDIA_A100_GPUS" + // "NVIDIA_K80_GPUS" ++ // "NVIDIA_L4_GPUS" + // "NVIDIA_P100_GPUS" + // "NVIDIA_P100_VWS_GPUS" + // "NVIDIA_P4_GPUS" +@@ -36879,6 +37846,7 @@ type Quota struct { + // "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS" + // "PREEMPTIBLE_NVIDIA_A100_GPUS" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" ++ // "PREEMPTIBLE_NVIDIA_L4_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_GPUS" +@@ -36902,6 +37870,7 @@ type Quota struct { + // "ROUTES" + // "SECURITY_POLICIES" + // "SECURITY_POLICIES_PER_REGION" ++ // "SECURITY_POLICY_ADVANCED_RULES_PER_REGION" + // "SECURITY_POLICY_CEVAL_RULES" + // "SECURITY_POLICY_RULES" + // "SECURITY_POLICY_RULES_PER_REGION" +@@ -37141,6 +38110,44 @@ func (s *Region) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type RegionAddressesMoveRequest struct { ++ // Description: An optional destination address description if intended ++ // to be different from the source. ++ Description string `json:"description,omitempty"` ++ ++ // DestinationAddress: The URL of the destination address to move to. ++ // This can be a full or partial URL. For example, the following are all ++ // valid URLs to a address: - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /addresses/address - ++ // projects/project/regions/region/addresses/address Note that ++ // destination project must be different from the source project. So ++ // /regions/region/addresses/address is not valid partial url. ++ DestinationAddress string `json:"destinationAddress,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Description") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Description") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionAddressesMoveRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionAddressesMoveRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // RegionAutoscalerList: Contains a list of autoscalers. + type RegionAutoscalerList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the +@@ -42782,10 +43789,9 @@ type RouterNat struct { + // in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list + // of Subnetworks are allowed to Nat (specified in the field subnetwork + // below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. +- // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or +- // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any +- // other Router.Nat section in any Router for this network in this +- // region. ++ // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then ++ // there should not be any other Router.Nat section in any Router for ++ // this network in this region. + // + // Possible values: + // "ALL_SUBNETWORKS_ALL_IP_RANGES" - All the IP ranges in every +@@ -44462,6 +45468,20 @@ type SecurityPolicy struct { + // compute#securityPolicyfor security policies + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // security policy, which is essentially a hash of the labels set used ++ // for optimistic locking. The fingerprint is initially generated by ++ // Compute Engine and changes after every request to modify or update ++ // labels. You must always provide an up-to-date fingerprint hash in ++ // order to update or change labels. To see the latest fingerprint, make ++ // get() request to the security policy. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -44576,13 +45596,17 @@ func (s *SecurityPolicyAdaptiveProtectionConfig) MarshalJSON() ([]byte, error) { + } + + // SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig: +-// Configuration options for L7 DDoS detection. ++// Configuration options for L7 DDoS detection. This field is only ++// supported in Global Security Policies of type CLOUD_ARMOR. + type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { +- // Enable: If set to true, enables CAAP for L7 DDoS detection. ++ // Enable: If set to true, enables CAAP for L7 DDoS detection. This ++ // field is only supported in Global Security Policies of type ++ // CLOUD_ARMOR. + Enable bool `json:"enable,omitempty"` + + // RuleVisibility: Rule visibility can be one of the following: STANDARD +- // - opaque rules. (default) PREMIUM - transparent rules. ++ // - opaque rules. (default) PREMIUM - transparent rules. This field is ++ // only supported in Global Security Policies of type CLOUD_ARMOR. + // + // Possible values: + // "PREMIUM" +@@ -44905,7 +45929,8 @@ type SecurityPolicyRecaptchaOptionsConfig struct { + // GOOGLE_RECAPTCHA under the security policy. The specified site key + // needs to be created from the reCAPTCHA API. The user is responsible + // for the validity of the specified site key. If not specified, a +- // Google-managed site key is used. ++ // Google-managed site key is used. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectSiteKey string `json:"redirectSiteKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RedirectSiteKey") to +@@ -44973,10 +45998,11 @@ type SecurityPolicyRule struct { + // rate_limit_options to be set. - redirect: redirect to a different + // target. This can either be an internal reCAPTCHA redirect, or an + // external URL-based redirect via a 302 response. Parameters for this +- // action can be configured via redirectOptions. - throttle: limit +- // client traffic to the configured threshold. Configure parameters for +- // this action in rateLimitOptions. Requires rate_limit_options to be +- // set for this. ++ // action can be configured via redirectOptions. This action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. - ++ // throttle: limit client traffic to the configured threshold. Configure ++ // parameters for this action in rateLimitOptions. Requires ++ // rate_limit_options to be set for this. + Action string `json:"action,omitempty"` + + // Description: An optional description of this resource. Provide this +@@ -44984,7 +46010,8 @@ type SecurityPolicyRule struct { + Description string `json:"description,omitempty"` + + // HeaderAction: Optional, additional actions that are performed on +- // headers. ++ // headers. This field is only supported in Global Security Policies of ++ // type CLOUD_ARMOR. + HeaderAction *SecurityPolicyRuleHttpHeaderAction `json:"headerAction,omitempty"` + + // Kind: [Output only] Type of the resource. Always +@@ -45015,7 +46042,8 @@ type SecurityPolicyRule struct { + RateLimitOptions *SecurityPolicyRuleRateLimitOptions `json:"rateLimitOptions,omitempty"` + + // RedirectOptions: Parameters defining the redirect action. Cannot be +- // specified for any other actions. ++ // specified for any other actions. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectOptions *SecurityPolicyRuleRedirectOptions `json:"redirectOptions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the +@@ -45115,7 +46143,13 @@ type SecurityPolicyRuleMatcher struct { + + // Expr: User defined CEVAL expression. A CEVAL expression is used to + // specify match criteria such as origin.ip, source.region_code and +- // contents in the request header. ++ // contents in the request header. Expressions containing ++ // `evaluateThreatIntelligence` require Cloud Armor Managed Protection ++ // Plus tier and are not supported in Edge Policies nor in Regional ++ // Policies. Expressions containing ++ // `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor ++ // Managed Protection Plus tier and are only supported in Global ++ // Security Policies. + Expr *Expr `json:"expr,omitempty"` + + // VersionedExpr: Preconfigured versioned expression. If this field is +@@ -45354,6 +46388,13 @@ type SecurityPolicyRuleRateLimitOptions struct { + // "XFF_IP" + EnforceOnKey string `json:"enforceOnKey,omitempty"` + ++ // EnforceOnKeyConfigs: If specified, any combination of values of ++ // enforce_on_key_type/enforce_on_key_name is treated as the key on ++ // which ratelimit threshold/action is enforced. You can specify up to 3 ++ // enforce_on_key_configs. If enforce_on_key_configs is specified, ++ // enforce_on_key must not be specified. ++ EnforceOnKeyConfigs []*SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig `json:"enforceOnKeyConfigs,omitempty"` ++ + // EnforceOnKeyName: Rate limit key name applicable only for the + // following key types: HTTP_HEADER -- Name of the HTTP header whose + // value is taken as the key value. HTTP_COOKIE -- Name of the HTTP +@@ -45365,12 +46406,14 @@ type SecurityPolicyRuleRateLimitOptions struct { + // response code, or redirect to a different endpoint. Valid options are + // `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, + // and 502, and `redirect`, where the redirect parameters come from +- // `exceedRedirectOptions` below. ++ // `exceedRedirectOptions` below. The `redirect` action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. + ExceedAction string `json:"exceedAction,omitempty"` + + // ExceedRedirectOptions: Parameters defining the redirect action that + // is used as the exceed action. Cannot be specified if the exceed +- // action is not redirect. ++ // action is not redirect. This field is only supported in Global ++ // Security Policies of type CLOUD_ARMOR. + ExceedRedirectOptions *SecurityPolicyRuleRedirectOptions `json:"exceedRedirectOptions,omitempty"` + + // RateLimitThreshold: Threshold at which to begin ratelimiting. +@@ -45400,6 +46443,71 @@ func (s *SecurityPolicyRuleRateLimitOptions) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { ++ // EnforceOnKeyName: Rate limit key name applicable only for the ++ // following key types: HTTP_HEADER -- Name of the HTTP header whose ++ // value is taken as the key value. HTTP_COOKIE -- Name of the HTTP ++ // cookie whose value is taken as the key value. ++ EnforceOnKeyName string `json:"enforceOnKeyName,omitempty"` ++ ++ // EnforceOnKeyType: Determines the key to enforce the ++ // rate_limit_threshold on. Possible values are: - ALL: A single rate ++ // limit threshold is applied to all the requests matching this rule. ++ // This is the default value if "enforceOnKeyConfigs" is not configured. ++ // - IP: The source IP address of the request is the key. Each IP has ++ // this limit enforced separately. - HTTP_HEADER: The value of the HTTP ++ // header whose name is configured under "enforceOnKeyName". The key ++ // value is truncated to the first 128 bytes of the header value. If no ++ // such header is present in the request, the key type defaults to ALL. ++ // - XFF_IP: The first IP address (i.e. the originating client IP ++ // address) specified in the list of IPs under X-Forwarded-For HTTP ++ // header. If no such header is present or the value is not a valid IP, ++ // the key defaults to the source IP address of the request i.e. key ++ // type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is ++ // configured under "enforceOnKeyName". The key value is truncated to ++ // the first 128 bytes of the cookie value. If no such cookie is present ++ // in the request, the key type defaults to ALL. - HTTP_PATH: The URL ++ // path of the HTTP request. The key value is truncated to the first 128 ++ // bytes. - SNI: Server name indication in the TLS session of the HTTPS ++ // request. The key value is truncated to the first 128 bytes. The key ++ // type defaults to ALL on a HTTP session. - REGION_CODE: The ++ // country/region from which the request originates. ++ // ++ // Possible values: ++ // "ALL" ++ // "HTTP_COOKIE" ++ // "HTTP_HEADER" ++ // "HTTP_PATH" ++ // "IP" ++ // "REGION_CODE" ++ // "SNI" ++ // "XFF_IP" ++ EnforceOnKeyType string `json:"enforceOnKeyType,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "EnforceOnKeyName") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "EnforceOnKeyName") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type SecurityPolicyRuleRateLimitOptionsThreshold struct { + // Count: Number of HTTP(S) requests for calculating the threshold. + Count int64 `json:"count,omitempty"` +@@ -45473,7 +46581,7 @@ type SecuritySettings struct { + // should authenticate with this service's backends. clientTlsPolicy + // only applies to a global BackendService with the loadBalancingScheme + // set to INTERNAL_SELF_MANAGED. If left blank, communications are not +- // encrypted. Note: This field currently has no impact. ++ // encrypted. + ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` + + // SubjectAltNames: Optional. A list of Subject Alternative Names (SANs) +@@ -45488,8 +46596,7 @@ type SecuritySettings struct { + // Public Key Infrastructure which provisions server identities. Only + // applies to a global BackendService with loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. Only applies when BackendService has an +- // attached clientTlsPolicy with clientCertificate (mTLS mode). Note: +- // This field currently has no impact. ++ // attached clientTlsPolicy with clientCertificate (mTLS mode). + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClientTlsPolicy") to +@@ -45638,7 +46745,7 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + // attachment represents a service that a producer has exposed. It + // encapsulates the load balancer which fronts the service runs and a + // list of NAT IP ranges that the producers uses to represent the +-// consumers connecting to the service. next tag = 20 ++// consumers connecting to the service. + type ServiceAttachment struct { + // ConnectedEndpoints: [Output Only] An array of connections for all the + // consumers connected to this service attachment. +@@ -45723,6 +46830,18 @@ type ServiceAttachment struct { + // the PSC service attachment. + PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"` + ++ // ReconcileConnections: This flag determines whether a consumer ++ // accept/reject list change can reconcile the statuses of existing ++ // ACCEPTED or REJECTED PSC endpoints. - If false, connection policy ++ // update will only affect existing PENDING PSC endpoints. Existing ++ // ACCEPTED/REJECTED endpoints will remain untouched regardless how the ++ // connection policy is modified . - If true, update will affect both ++ // PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED ++ // PSC endpoint will be moved to REJECTED if its project is added to the ++ // reject list. For newly created service attachment, this boolean ++ // defaults to true. ++ ReconcileConnections bool `json:"reconcileConnections,omitempty"` ++ + // Region: [Output Only] URL of the region where the service attachment + // resides. This field applies only to the region resource. You must + // specify this field as part of the HTTP request URL. It is not +@@ -48933,8 +50052,8 @@ type Subnetwork struct { + // If this field is not explicitly set, it will not appear in get + // listings. If not set the default behavior is determined by the org + // policy, if there is no org policy specified, then it will default to +- // disabled. This field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // disabled. This field isn't supported if the subnet purpose field is ++ // set to REGIONAL_MANAGED_PROXY. + EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` + + // ExternalIpv6Prefix: The external IPv6 address range that is owned by +@@ -49027,12 +50146,20 @@ type Subnetwork struct { + PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` + + // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal +@@ -49051,9 +50178,9 @@ type Subnetwork struct { + Region string `json:"region,omitempty"` + + // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // +@@ -49528,6 +50655,8 @@ type SubnetworkLogConfig struct { + // field is not explicitly set, it will not appear in get listings. If + // not set the default behavior is determined by the org policy, if + // there is no org policy specified, then it will default to disabled. ++ // Flow logging isn't supported if the subnet purpose field is set to ++ // REGIONAL_MANAGED_PROXY. + Enable bool `json:"enable,omitempty"` + + // FilterExpr: Can only be specified if VPC flow logs for this +@@ -50992,7 +52121,9 @@ func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) + + type TargetHttpsProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetHttpsProxy. ++ // TargetHttpsProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -51109,7 +52240,9 @@ type TargetHttpsProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -51187,9 +52320,11 @@ type TargetHttpsProxy struct { + // networksecurity.ServerTlsPolicy resource that describes how the proxy + // should authenticate inbound traffic. serverTlsPolicy only applies to + // a global TargetHttpsProxy attached to globalForwardingRules with the +- // loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, +- // communications are not encrypted. Note: This field currently has no +- // impact. ++ // loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or ++ // EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are ++ // accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, ++ // EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy ++ // documentation. If left blank, communications are not encrypted. + ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to +@@ -53175,7 +54310,9 @@ func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) + + type TargetSslProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetSslProxy. ++ // TargetSslProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -53273,7 +54410,9 @@ type TargetSslProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -54268,6 +55407,21 @@ type TargetVpnGateway struct { + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // TargetVpnGateway, which is essentially a hash of the labels set used ++ // for optimistic locking. The fingerprint is initially generated by ++ // Compute Engine and changes after every request to modify or update ++ // labels. You must always provide an up-to-date fingerprint hash in ++ // order to update or change labels, otherwise the request will fail ++ // with error 412 conditionNotMet. To see the latest fingerprint, make a ++ // get() request to retrieve a TargetVpnGateway. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -55980,6 +57134,22 @@ type UrlRewrite struct { + // characters. + PathPrefixRewrite string `json:"pathPrefixRewrite,omitempty"` + ++ // PathTemplateRewrite: If specified, the pattern rewrites the URL path ++ // (based on the :path header) using the HTTP template syntax. A ++ // corresponding path_template_match must be specified. Any template ++ // variables must exist in the path_template_match field. - -At least ++ // one variable must be specified in the path_template_match field - You ++ // can omit variables from the rewritten URL - The * and ** operators ++ // cannot be matched unless they have a corresponding variable name - ++ // e.g. {format=*} or {var=**}. For example, a path_template_match of ++ // /static/{format=**} could be rewritten as /static/content/{format} to ++ // prefix /content to the URL. Variables can also be re-ordered in a ++ // rewrite, so that /{country}/{format}/{suffix=**} can be rewritten as ++ // /content/{format}/{country}/{suffix}. At least one non-empty ++ // routeRules[].matchRules[].path_template_match is required. Only one ++ // of path_prefix_rewrite or path_template_rewrite may be specified. ++ PathTemplateRewrite string `json:"pathTemplateRewrite,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "HostRewrite") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -56033,12 +57203,20 @@ type UsableSubnetwork struct { + Network string `json:"network,omitempty"` + + // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal +@@ -56053,9 +57231,9 @@ type UsableSubnetwork struct { + Purpose string `json:"purpose,omitempty"` + + // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // +@@ -57284,7 +58462,7 @@ type VpnGatewayStatusTunnel struct { + + // PeerGatewayInterface: The peer gateway interface this VPN tunnel is + // connected to, the peer gateway could either be an external VPN +- // gateway or GCP VPN gateway. ++ // gateway or a Google Cloud VPN gateway. + PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` + + // TunnelUrl: URL reference to the VPN tunnel. +@@ -57317,8 +58495,8 @@ func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { + + // VpnGatewayStatusVpnConnection: A VPN connection contains all VPN + // tunnels connected from this VpnGateway to the same peer gateway. The +-// peer gateway could either be a external VPN gateway or GCP VPN +-// gateway. ++// peer gateway could either be an external VPN gateway or a Google ++// Cloud VPN gateway. + type VpnGatewayStatusVpnConnection struct { + // PeerExternalGateway: URL reference to the peer external VPN gateways + // to which the VPN tunnels in this VPN connection are connected. This +@@ -57635,6 +58813,21 @@ type VpnTunnel struct { + // VPN tunnels. + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // VpnTunnel, which is essentially a hash of the labels set used for ++ // optimistic locking. The fingerprint is initially generated by Compute ++ // Engine and changes after every request to modify or update labels. ++ // You must always provide an up-to-date fingerprint hash in order to ++ // update or change labels, otherwise the request will fail with error ++ // 412 conditionNotMet. To see the latest fingerprint, make a get() ++ // request to retrieve a VpnTunnel. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // LocalTrafficSelector: Local traffic selector to use when establishing + // the VPN tunnel with the peer VPN gateway. The value should be a CIDR + // formatted string, for example: 192.168.0.0/16. The ranges must be +@@ -60906,6 +62099,194 @@ func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) erro + } + } + ++// method id "compute.addresses.move": ++ ++type AddressesMoveCall struct { ++ s *Service ++ project string ++ region string ++ address string ++ regionaddressesmoverequest *RegionAddressesMoveRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Move: Moves the specified address resource. ++// ++// - address: Name of the address resource to move. ++// - project: Source project ID which the Address is moved from. ++// - region: Name of the region for this request. ++func (r *AddressesService) Move(project string, region string, address string, regionaddressesmoverequest *RegionAddressesMoveRequest) *AddressesMoveCall { ++ c := &AddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.address = address ++ c.regionaddressesmoverequest = regionaddressesmoverequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *AddressesMoveCall) RequestId(requestId string) *AddressesMoveCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *AddressesMoveCall) Fields(s ...googleapi.Field) *AddressesMoveCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *AddressesMoveCall) Context(ctx context.Context) *AddressesMoveCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *AddressesMoveCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *AddressesMoveCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionaddressesmoverequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{address}/move") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "address": c.address, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.addresses.move" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *AddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Moves the specified address resource.", ++ // "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", ++ // "httpMethod": "POST", ++ // "id": "compute.addresses.move", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "address" ++ // ], ++ // "parameters": { ++ // "address": { ++ // "description": "Name of the address resource to move.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Source project ID which the Address is moved from.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/addresses/{address}/move", ++ // "request": { ++ // "$ref": "RegionAddressesMoveRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.addresses.setLabels": + + type AddressesSetLabelsCall struct { +@@ -78150,6 +79531,183 @@ func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList + } + } + ++// method id "compute.globalAddresses.move": ++ ++type GlobalAddressesMoveCall struct { ++ s *Service ++ project string ++ address string ++ globaladdressesmoverequest *GlobalAddressesMoveRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Move: Moves the specified address resource from one project to ++// another project. ++// ++// - address: Name of the address resource to move. ++// - project: Source project ID which the Address is moved from. ++func (r *GlobalAddressesService) Move(project string, address string, globaladdressesmoverequest *GlobalAddressesMoveRequest) *GlobalAddressesMoveCall { ++ c := &GlobalAddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.address = address ++ c.globaladdressesmoverequest = globaladdressesmoverequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *GlobalAddressesMoveCall) RequestId(requestId string) *GlobalAddressesMoveCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *GlobalAddressesMoveCall) Fields(s ...googleapi.Field) *GlobalAddressesMoveCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *GlobalAddressesMoveCall) Context(ctx context.Context) *GlobalAddressesMoveCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *GlobalAddressesMoveCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *GlobalAddressesMoveCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.globaladdressesmoverequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}/move") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "address": c.address, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.globalAddresses.move" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *GlobalAddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Moves the specified address resource from one project to another project.", ++ // "flatPath": "projects/{project}/global/addresses/{address}/move", ++ // "httpMethod": "POST", ++ // "id": "compute.globalAddresses.move", ++ // "parameterOrder": [ ++ // "project", ++ // "address" ++ // ], ++ // "parameters": { ++ // "address": { ++ // "description": "Name of the address resource to move.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Source project ID which the Address is moved from.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/addresses/{address}/move", ++ // "request": { ++ // "$ref": "GlobalAddressesMoveRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.globalAddresses.setLabels": + + type GlobalAddressesSetLabelsCall struct { +@@ -104089,164 +105647,6 @@ func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, + return c + } + +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "instance": c.instance, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.instances.simulateMaintenanceEvent" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &Operation{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", +- // "httpMethod": "POST", +- // "id": "compute.instances.simulateMaintenanceEvent", +- // "parameterOrder": [ +- // "project", +- // "zone", +- // "instance" +- // ], +- // "parameters": { +- // "instance": { +- // "description": "Name of the instance scoping this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // }, +- // "zone": { +- // "description": "The name of the zone for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", +- // "response": { +- // "$ref": "Operation" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" +- // ] +- // } +- +-} +- +-// method id "compute.instances.start": +- +-type InstancesStartCall struct { +- s *Service +- project string +- zone string +- instance string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header +-} +- +-// Start: Starts an instance that was stopped using the instances().stop +-// method. For more information, see Restart an instance. +-// +-// - instance: Name of the instance resource to start. +-// - project: Project ID for this request. +-// - zone: The name of the zone for this request. +-func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { +- c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.zone = zone +- c.instance = instance +- return c +-} +- + // RequestId sets the optional parameter "requestId": An optional + // request ID to identify requests. Specify a unique request ID so that + // if you must retry your request, the server will know to ignore the +@@ -104258,7 +105658,7 @@ func (r *InstancesService) Start(project string, zone string, instance string) * + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -104266,7 +105666,7 @@ func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -104274,21 +105674,21 @@ func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesStartCall) Header() http.Header { ++func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -104298,7 +105698,7 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -104313,14 +105713,193 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.start" call. ++// Do executes the "compute.instances.simulateMaintenanceEvent" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", ++ // "httpMethod": "POST", ++ // "id": "compute.instances.simulateMaintenanceEvent", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "instance" ++ // ], ++ // "parameters": { ++ // "instance": { ++ // "description": "Name of the instance scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.instances.start": ++ ++type InstancesStartCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Start: Starts an instance that was stopped using the instances().stop ++// method. For more information, see Restart an instance. ++// ++// - instance: Name of the instance resource to start. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { ++ c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.instance = instance ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InstancesStartCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "instance": c.instance, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.instances.start" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -108151,6 +109730,449 @@ func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*Inter + } + } + ++// method id "compute.interconnectRemoteLocations.get": ++ ++type InterconnectRemoteLocationsGetCall struct { ++ s *Service ++ project string ++ interconnectRemoteLocation string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Get: Returns the details for the specified interconnect remote ++// location. Gets a list of available interconnect remote locations by ++// making a list() request. ++// ++// - interconnectRemoteLocation: Name of the interconnect remote ++// location to return. ++// - project: Project ID for this request. ++func (r *InterconnectRemoteLocationsService) Get(project string, interconnectRemoteLocation string) *InterconnectRemoteLocationsGetCall { ++ c := &InterconnectRemoteLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.interconnectRemoteLocation = interconnectRemoteLocation ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InterconnectRemoteLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InterconnectRemoteLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InterconnectRemoteLocationsGetCall) Context(ctx context.Context) *InterconnectRemoteLocationsGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InterconnectRemoteLocationsGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InterconnectRemoteLocationsGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "interconnectRemoteLocation": c.interconnectRemoteLocation, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.interconnectRemoteLocations.get" call. ++// Exactly one of *InterconnectRemoteLocation or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *InterconnectRemoteLocation.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectRemoteLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &InterconnectRemoteLocation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", ++ // "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ // "httpMethod": "GET", ++ // "id": "compute.interconnectRemoteLocations.get", ++ // "parameterOrder": [ ++ // "project", ++ // "interconnectRemoteLocation" ++ // ], ++ // "parameters": { ++ // "interconnectRemoteLocation": { ++ // "description": "Name of the interconnect remote location to return.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ // "response": { ++ // "$ref": "InterconnectRemoteLocation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.interconnectRemoteLocations.list": ++ ++type InterconnectRemoteLocationsListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// List: Retrieves the list of interconnect remote locations available ++// to the specified project. ++// ++// - project: Project ID for this request. ++func (r *InterconnectRemoteLocationsService) List(project string) *InterconnectRemoteLocationsListCall { ++ c := &InterconnectRemoteLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *InterconnectRemoteLocationsListCall) Filter(filter string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *InterconnectRemoteLocationsListCall) MaxResults(maxResults int64) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *InterconnectRemoteLocationsListCall) OrderBy(orderBy string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *InterconnectRemoteLocationsListCall) PageToken(pageToken string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *InterconnectRemoteLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InterconnectRemoteLocationsListCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InterconnectRemoteLocationsListCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InterconnectRemoteLocationsListCall) Context(ctx context.Context) *InterconnectRemoteLocationsListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InterconnectRemoteLocationsListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InterconnectRemoteLocationsListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.interconnectRemoteLocations.list" call. ++// Exactly one of *InterconnectRemoteLocationList or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *InterconnectRemoteLocationList.ServerResponse.Header or (if a ++// response was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectRemoteLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocationList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &InterconnectRemoteLocationList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves the list of interconnect remote locations available to the specified project.", ++ // "flatPath": "projects/{project}/global/interconnectRemoteLocations", ++ // "httpMethod": "GET", ++ // "id": "compute.interconnectRemoteLocations.list", ++ // "parameterOrder": [ ++ // "project" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/global/interconnectRemoteLocations", ++ // "response": { ++ // "$ref": "InterconnectRemoteLocationList" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *InterconnectRemoteLocationsListCall) Pages(ctx context.Context, f func(*InterconnectRemoteLocationList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ + // method id "compute.interconnects.delete": + + type InterconnectsDeleteCall struct { +@@ -125309,6 +127331,196 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera + + } + ++// method id "compute.nodeGroups.simulateMaintenanceEvent": ++ ++type NodeGroupsSimulateMaintenanceEventCall struct { ++ s *Service ++ project string ++ zone string ++ nodeGroup string ++ nodegroupssimulatemaintenanceeventrequest *NodeGroupsSimulateMaintenanceEventRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SimulateMaintenanceEvent: Simulates maintenance event on specified ++// nodes from the node group. ++// ++// - nodeGroup: Name of the NodeGroup resource whose nodes will go under ++// maintenance simulation. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *NodeGroupsService) SimulateMaintenanceEvent(project string, zone string, nodeGroup string, nodegroupssimulatemaintenanceeventrequest *NodeGroupsSimulateMaintenanceEventRequest) *NodeGroupsSimulateMaintenanceEventCall { ++ c := &NodeGroupsSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.nodeGroup = nodeGroup ++ c.nodegroupssimulatemaintenanceeventrequest = nodegroupssimulatemaintenanceeventrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NodeGroupsSimulateMaintenanceEventCall) RequestId(requestId string) *NodeGroupsSimulateMaintenanceEventCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NodeGroupsSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *NodeGroupsSimulateMaintenanceEventCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NodeGroupsSimulateMaintenanceEventCall) Context(ctx context.Context) *NodeGroupsSimulateMaintenanceEventCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NodeGroupsSimulateMaintenanceEventCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NodeGroupsSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssimulatemaintenanceeventrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "nodeGroup": c.nodeGroup, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.nodeGroups.simulateMaintenanceEvent" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NodeGroupsSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Simulates maintenance event on specified nodes from the node group.", ++ // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", ++ // "httpMethod": "POST", ++ // "id": "compute.nodeGroups.simulateMaintenanceEvent", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "nodeGroup" ++ // ], ++ // "parameters": { ++ // "nodeGroup": { ++ // "description": "Name of the NodeGroup resource whose nodes will go under maintenance simulation.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", ++ // "request": { ++ // "$ref": "NodeGroupsSimulateMaintenanceEventRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.nodeGroups.testIamPermissions": + + type NodeGroupsTestIamPermissionsCall struct { +@@ -167365,6 +169577,15 @@ func (c *RoutersGetNatMappingInfoCall) MaxResults(maxResults int64) *RoutersGetN + return c + } + ++// NatName sets the optional parameter "natName": Name of the nat ++// service to filter the Nat Mapping information. If it is omitted, all ++// nats for this router will be returned. Name should conform to ++// RFC1035. ++func (c *RoutersGetNatMappingInfoCall) NatName(natName string) *RoutersGetNatMappingInfoCall { ++ c.urlParams_.Set("natName", natName) ++ return c ++} ++ + // OrderBy sets the optional parameter "orderBy": Sorts list results by + // a certain order. By default, results are returned in alphanumerical + // order based on the resource name. You can also sort results in +@@ -167520,6 +169741,11 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp + // "minimum": "0", + // "type": "integer" + // }, ++ // "natName": { ++ // "description": "Name of the nat service to filter the Nat Mapping information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", ++ // "location": "query", ++ // "type": "string" ++ // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", +diff --git a/vendor/google.golang.org/api/container/v1/container-api.json b/vendor/google.golang.org/api/container/v1/container-api.json +index 2569981131b..0f33192ba35 100644 +--- a/vendor/google.golang.org/api/container/v1/container-api.json ++++ b/vendor/google.golang.org/api/container/v1/container-api.json +@@ -197,6 +197,31 @@ + "resources": { + "clusters": { + "methods": { ++ "checkAutopilotCompatibility": { ++ "description": "Checks the cluster compatibility with Autopilot mode, and returns a list of compatibility issues.", ++ "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:checkAutopilotCompatibility", ++ "httpMethod": "GET", ++ "id": "container.projects.locations.clusters.checkAutopilotCompatibility", ++ "parameterOrder": [ ++ "name" ++ ], ++ "parameters": { ++ "name": { ++ "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", ++ "location": "path", ++ "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "v1/{+name}:checkAutopilotCompatibility", ++ "response": { ++ "$ref": "CheckAutopilotCompatibilityResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform" ++ ] ++ }, + "completeIpRotation": { + "description": "Completes master IP rotation.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:completeIpRotation", +@@ -447,6 +472,7 @@ + ] + }, + "setLocations": { ++ "deprecated": true, + "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", + "httpMethod": "POST", +@@ -1517,6 +1543,7 @@ + ] + }, + "locations": { ++ "deprecated": true, + "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", + "httpMethod": "POST", +@@ -2487,7 +2514,7 @@ + } + } + }, +- "revision": "20230222", ++ "revision": "20230519", + "rootUrl": "https://container.googleapis.com/", + "schemas": { + "AcceleratorConfig": { +@@ -2514,6 +2541,20 @@ + }, + "type": "object" + }, ++ "AdditionalPodRangesConfig": { ++ "description": "AdditionalPodRangesConfig is the configuration for additional pod secondary ranges supporting the ClusterUpdate message.", ++ "id": "AdditionalPodRangesConfig", ++ "properties": { ++ "podRangeNames": { ++ "description": "Name for pod secondary ipv4 range which has the actual range defined ahead.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "AddonsConfig": { + "description": "Configuration for the addons that can be automatically spun up in the cluster, enabling additional functionality.", + "id": "AddonsConfig", +@@ -2614,6 +2655,53 @@ + }, + "type": "object" + }, ++ "AutopilotCompatibilityIssue": { ++ "description": "AutopilotCompatibilityIssue contains information about a specific compatibility issue with Autopilot mode.", ++ "id": "AutopilotCompatibilityIssue", ++ "properties": { ++ "constraintType": { ++ "description": "The constraint type of the issue.", ++ "type": "string" ++ }, ++ "description": { ++ "description": "The description of the issue.", ++ "type": "string" ++ }, ++ "documentationUrl": { ++ "description": "A URL to a public documnetation, which addresses resolving this issue.", ++ "type": "string" ++ }, ++ "incompatibilityType": { ++ "description": "The incompatibility type of this issue.", ++ "enum": [ ++ "UNSPECIFIED", ++ "INCOMPATIBILITY", ++ "ADDITIONAL_CONFIG_REQUIRED", ++ "PASSED_WITH_OPTIONAL_CONFIG" ++ ], ++ "enumDescriptions": [ ++ "Default value, should not be used.", ++ "Indicates that the issue is a known incompatibility between the cluster and Autopilot mode.", ++ "Indicates the issue is an incompatibility if customers take no further action to resolve.", ++ "Indicates the issue is not an incompatibility, but depending on the workloads business logic, there is a potential that they won't work on Autopilot." ++ ], ++ "type": "string" ++ }, ++ "lastObservation": { ++ "description": "The last time when this issue was observed.", ++ "format": "google-datetime", ++ "type": "string" ++ }, ++ "subjects": { ++ "description": "The name of the resources which are subject to this issue.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "AutoprovisioningNodePoolDefaults": { + "description": "AutoprovisioningNodePoolDefaults contains defaults for a node pool created by NAP.", + "id": "AutoprovisioningNodePoolDefaults", +@@ -2665,6 +2753,22 @@ + }, + "type": "object" + }, ++ "BestEffortProvisioning": { ++ "description": "Best effort provisioning.", ++ "id": "BestEffortProvisioning", ++ "properties": { ++ "enabled": { ++ "description": "When this is enabled, cluster/node pool creations will ignore non-fatal errors like stockout to best provision as many nodes as possible right now and eventually bring up all target number of nodes", ++ "type": "boolean" ++ }, ++ "minProvisionNodes": { ++ "description": "Minimum number of nodes to be provisioned to be considered as succeeded, and the rest of nodes will be provisioned gradually and eventually when stockout issue has been resolved.", ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "BigQueryDestination": { + "description": "Parameters for using BigQuery as the destination of resource usage export.", + "id": "BigQueryDestination", +@@ -2793,6 +2897,24 @@ + }, + "type": "object" + }, ++ "CheckAutopilotCompatibilityResponse": { ++ "description": "CheckAutopilotCompatibilityResponse has a list of compatibility issues.", ++ "id": "CheckAutopilotCompatibilityResponse", ++ "properties": { ++ "issues": { ++ "description": "The list of issues for the given operation.", ++ "items": { ++ "$ref": "AutopilotCompatibilityIssue" ++ }, ++ "type": "array" ++ }, ++ "summary": { ++ "description": "The summary of the autopilot compatibility response.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "CidrBlock": { + "description": "CidrBlock contains an optional name and one CIDR block.", + "id": "CidrBlock", +@@ -2916,6 +3038,10 @@ + "description": "An optional description of this cluster.", + "type": "string" + }, ++ "enableK8sBetaApis": { ++ "$ref": "K8sBetaAPIConfig", ++ "description": "Beta APIs Config" ++ }, + "enableKubernetesAlpha": { + "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha API groups (e.g. v1alpha1) and features that may not be production ready in the kubernetes version of the master and nodes. The cluster has no SLA for uptime and master/node upgrades are disabled. Alpha enabled clusters are automatically deleted thirty days after creation.", + "type": "boolean" +@@ -2936,6 +3062,10 @@ + "description": "[Output only] The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "type": "string" + }, ++ "fleet": { ++ "$ref": "Fleet", ++ "description": "Fleet information for the cluster." ++ }, + "id": { + "description": "Output only. Unique id for the cluster.", + "readOnly": true, +@@ -3066,7 +3196,7 @@ + }, + "releaseChannel": { + "$ref": "ReleaseChannel", +- "description": "Release channel configuration." ++ "description": "Release channel configuration. If left unspecified on cluster creation and a version is specified, the cluster is enrolled in the most mature release channel where the version is available (first checking STABLE, then REGULAR, and finally RAPID). Otherwise, if no release channel configuration and no version is specified, the cluster is enrolled in the REGULAR channel with its default version." + }, + "resourceLabels": { + "additionalProperties": { +@@ -3187,6 +3317,10 @@ + "description": "ClusterUpdate describes an update to the cluster. Exactly one update can be applied to a cluster with each request, so at most one field can be provided.", + "id": "ClusterUpdate", + "properties": { ++ "additionalPodRangesConfig": { ++ "$ref": "AdditionalPodRangesConfig", ++ "description": "The additional pod ranges to be added to the cluster. These pod ranges can be used by node pools to allocate pod IPs." ++ }, + "desiredAddonsConfig": { + "$ref": "AddonsConfig", + "description": "Configurations for the various addons available to run in the cluster." +@@ -3233,10 +3367,18 @@ + "$ref": "DNSConfig", + "description": "DNSConfig contains clusterDNS config for this cluster." + }, ++ "desiredEnableFqdnNetworkPolicy": { ++ "description": "Enable/Disable FQDN Network Policy for the cluster.", ++ "type": "boolean" ++ }, + "desiredEnablePrivateEndpoint": { + "description": "Enable/Disable private endpoint for the cluster's master.", + "type": "boolean" + }, ++ "desiredFleet": { ++ "$ref": "Fleet", ++ "description": "The desired fleet configuration for the cluster." ++ }, + "desiredGatewayApiConfig": { + "$ref": "GatewayAPIConfig", + "description": "The desired config of Gateway API on this cluster." +@@ -3257,6 +3399,10 @@ + "$ref": "IntraNodeVisibilityConfig", + "description": "The desired config of Intra-node visibility." + }, ++ "desiredK8sBetaApis": { ++ "$ref": "K8sBetaAPIConfig", ++ "description": "Desired Beta APIs to be enabled for cluster." ++ }, + "desiredL4ilbSubsettingConfig": { + "$ref": "ILBSubsettingConfig", + "description": "The desired L4 Internal Load Balancer Subsetting configuration." +@@ -3378,9 +3524,17 @@ + "$ref": "WorkloadIdentityConfig", + "description": "Configuration for Workload Identity." + }, ++ "enableK8sBetaApis": { ++ "$ref": "K8sBetaAPIConfig", ++ "description": "Kubernetes open source beta apis enabled on the cluster. Only beta apis" ++ }, + "etag": { + "description": "The current etag of the cluster. If an etag is provided and does not match the current etag of the cluster, update will be blocked and an ABORTED error will be returned.", + "type": "string" ++ }, ++ "removedAdditionalPodRangesConfig": { ++ "$ref": "AdditionalPodRangesConfig", ++ "description": "The additional pod ranges that are to be removed from the cluster. The pod ranges specified here must have been specified earlier in the 'additional_pod_ranges_config' argument." + } + }, + "type": "object" +@@ -3571,7 +3725,7 @@ + "type": "string" + }, + "state": { +- "description": "Denotes the state of etcd encryption.", ++ "description": "The desired state of etcd encryption.", + "enum": [ + "UNKNOWN", + "ENCRYPTED", +@@ -3664,6 +3818,25 @@ + }, + "type": "object" + }, ++ "Fleet": { ++ "description": "Fleet is the fleet configuration for the cluster.", ++ "id": "Fleet", ++ "properties": { ++ "membership": { ++ "description": "[Output only] The full resource name of the registered fleet membership of the cluster, in the format `//gkehub.googleapis.com/projects/*/locations/*/memberships/*`.", ++ "type": "string" ++ }, ++ "preRegistered": { ++ "description": "[Output only] Whether the cluster has been registered through the fleet API.", ++ "type": "boolean" ++ }, ++ "project": { ++ "description": "The Fleet host project(project ID or project number) where this cluster will be registered to. This field cannot be changed after the cluster has been registered.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "GPUSharingConfig": { + "description": "GPUSharingConfig represents the GPU sharing configuration for Hardware Accelerators.", + "id": "GPUSharingConfig", +@@ -3884,6 +4057,11 @@ + "description": "Configuration for controlling how IPs are allocated in the cluster.", + "id": "IPAllocationPolicy", + "properties": { ++ "additionalPodRangesConfig": { ++ "$ref": "AdditionalPodRangesConfig", ++ "description": "Output only. [Output only] The additional pod ranges that are added to the cluster. These pod ranges can be used by new node pools to allocate pod IPs automatically. Once the range is removed it will not show up in IPAllocationPolicy.", ++ "readOnly": true ++ }, + "clusterIpv4Cidr": { + "description": "This field is deprecated, use cluster_ipv4_cidr_block.", + "type": "string" +@@ -3922,6 +4100,10 @@ + "description": "The IP address range of the instance IPs in this cluster. This is applicable only if `create_subnetwork` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", + "type": "string" + }, ++ "podCidrOverprovisionConfig": { ++ "$ref": "PodCIDROverprovisionConfig", ++ "description": "[PRIVATE FIELD] Pod CIDR size overprovisioning config for the cluster. Pod CIDR size per node depends on max_pods_per_node. By default, the value of max_pods_per_node is doubled and then rounded off to next power of 2 to get the size of pod CIDR block per node. Example: max_pods_per_node of 30 would result in 64 IPs (/26). This config can disable the doubling of IPs (we still round off to next power of 2) Example: max_pods_per_node of 30 will result in 32 IPs (/27) when overprovisioning is disabled." ++ }, + "servicesIpv4Cidr": { + "description": "This field is deprecated, use services_ipv4_cidr_block.", + "type": "string" +@@ -4042,6 +4224,20 @@ + }, + "type": "object" + }, ++ "K8sBetaAPIConfig": { ++ "description": "K8sBetaAPIConfig , configuration for beta APIs", ++ "id": "K8sBetaAPIConfig", ++ "properties": { ++ "enabledApis": { ++ "description": "Enabled k8s beta APIs.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "KubernetesDashboard": { + "description": "Configuration for the Kubernetes Dashboard.", + "id": "KubernetesDashboard", +@@ -4479,6 +4675,10 @@ + "$ref": "DNSConfig", + "description": "DNSConfig contains clusterDNS config for this cluster." + }, ++ "enableFqdnNetworkPolicy": { ++ "description": "Whether FQDN Network Policy is enabled on this cluster.", ++ "type": "boolean" ++ }, + "enableIntraNodeVisibility": { + "description": "Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.", + "type": "boolean" +@@ -4589,6 +4789,38 @@ + }, + "type": "object" + }, ++ "NodeAffinity": { ++ "description": "Specifies the NodeAffinity key, values, and affinity operator according to [shared sole tenant node group affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity).", ++ "id": "NodeAffinity", ++ "properties": { ++ "key": { ++ "description": "Key for NodeAffinity.", ++ "type": "string" ++ }, ++ "operator": { ++ "description": "Operator for NodeAffinity.", ++ "enum": [ ++ "OPERATOR_UNSPECIFIED", ++ "IN", ++ "NOT_IN" ++ ], ++ "enumDescriptions": [ ++ "Invalid or unspecified affinity operator.", ++ "Affinity operator.", ++ "Anti-affinity operator." ++ ], ++ "type": "string" ++ }, ++ "values": { ++ "description": "Values for NodeAffinity.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "NodeConfig": { + "description": "Parameters that describe the nodes in a cluster. GKE Autopilot clusters do not recognize parameters in `NodeConfig`. Use AutoprovisioningNodePoolDefaults instead.", + "id": "NodeConfig", +@@ -4722,6 +4954,10 @@ + "$ref": "ShieldedInstanceConfig", + "description": "Shielded Instance options." + }, ++ "soleTenantConfig": { ++ "$ref": "SoleTenantConfig", ++ "description": "Parameters for node pools to be backed by shared sole tenant node groups." ++ }, + "spot": { + "description": "Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.", + "type": "boolean" +@@ -4839,6 +5075,10 @@ + "$ref": "NetworkPerformanceConfig", + "description": "Network bandwidth tier configuration." + }, ++ "podCidrOverprovisionConfig": { ++ "$ref": "PodCIDROverprovisionConfig", ++ "description": "[PRIVATE FIELD] Pod CIDR size overprovisioning config for the nodepool. Pod CIDR size per node depends on max_pods_per_node. By default, the value of max_pods_per_node is rounded off to next power of 2 and we then double that to get the size of pod CIDR block per node. Example: max_pods_per_node of 30 would result in 64 IPs (/26). This config can disable the doubling of IPs (we still round off to next power of 2) Example: max_pods_per_node of 30 will result in 32 IPs (/27) when overprovisioning is disabled." ++ }, + "podIpv4CidrBlock": { + "description": "The IP address range for pod IPs in this node pool. Only applicable if `create_pod_range` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) to pick a specific range to use. Only applicable if `ip_allocation_policy.use_ip_aliases` is true. This field cannot be changed after the node pool has been created.", + "type": "string" +@@ -4858,6 +5098,10 @@ + "$ref": "NodePoolAutoscaling", + "description": "Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present." + }, ++ "bestEffortProvisioning": { ++ "$ref": "BestEffortProvisioning", ++ "description": "Enable best effort provisioning for nodes" ++ }, + "conditions": { + "description": "Which conditions caused the current node pool state.", + "items": { +@@ -5158,26 +5402,28 @@ + "SET_MASTER_AUTH", + "SET_NODE_POOL_SIZE", + "SET_NETWORK_POLICY", +- "SET_MAINTENANCE_POLICY" ++ "SET_MAINTENANCE_POLICY", ++ "RESIZE_CLUSTER" + ], + "enumDescriptions": [ + "Not set.", +- "Cluster create.", +- "Cluster delete.", +- "A master upgrade.", +- "A node upgrade.", +- "Cluster repair.", +- "Cluster update.", +- "Node pool create.", +- "Node pool delete.", +- "Set node pool management.", +- "Automatic node pool repair.", +- "Automatic node upgrade.", +- "Set labels.", +- "Set/generate master auth materials", +- "Set node pool size.", +- "Updates network policy for a cluster.", +- "Set the maintenance policy." ++ "The cluster is being created. The cluster should be assumed to be unusable until the operation finishes. In the event of the operation failing, the cluster will enter the ERROR state and eventually be deleted.", ++ "The cluster is being deleted. The cluster should be assumed to be unusable as soon as this operation starts. In the event of the operation failing, the cluster will enter the ERROR state and the deletion will be automatically retried until completed.", ++ "The cluster version is being updated. Note that this includes \"upgrades\" to the same version, which are simply a recreation. This also includes [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-upgrades#upgrading_automatically). For more details, see [documentation on cluster upgrades](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-upgrades#cluster_upgrades).", ++ "A node pool is being updated. Despite calling this an \"upgrade\", this includes most forms of updates to node pools. This also includes [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-upgrades). This operation sets the progress field and may be canceled. The upgrade strategy depends on [node pool configuration](https://cloud.google.com/kubernetes-engine/docs/concepts/node-pool-upgrade-strategies). The nodes are generally still usable during this operation.", ++ "A problem has been detected with the control plane and is being repaired. This operation type is initiated by GKE. For more details, see [documentation on repairs](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions#repairs).", ++ "The cluster is being updated. This is a broad category of operations and includes operations that only change metadata as well as those that must recreate the entire cluster. If the control plane must be recreated, this will cause temporary downtime for zonal clusters. Some features require recreating the nodes as well. Those will be recreated as separate operations and the update may not be completely functional until the node pools recreations finish. Node recreations will generally follow [maintenance policies](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions). Some GKE-initiated operations use this type. This includes certain types of auto-upgrades and incident mitigations.", ++ "A node pool is being created. The node pool should be assumed to be unusable until this operation finishes. In the event of an error, the node pool may be partially created. If enabled, [node autoprovisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning) may have automatically initiated such operations.", ++ "The node pool is being deleted. The node pool should be assumed to be unusable as soon as this operation starts.", ++ "The node pool's manamagent field is being updated. These operations only update metadata and may be concurrent with most other operations.", ++ "A problem has been detected with nodes and [they are being repaired](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-repair). This operation type is initiated by GKE, typically automatically. This operation may be concurrent with other operations and there may be multiple repairs occurring on the same node pool.", ++ "Unused. Automatic node upgrade uses UPGRADE_NODES.", ++ "Unused. Updating labels uses UPDATE_CLUSTER.", ++ "Unused. Updating master auth uses UPDATE_CLUSTER.", ++ "The node pool is being resized. With the exception of resizing to or from size zero, the node pool is generally usable during this operation.", ++ "Unused. Updating network policy uses UPDATE_CLUSTER.", ++ "Unused. Updating maintenance policy uses UPDATE_CLUSTER.", ++ "The control plane is being resized. This operation type is initiated by GKE. These operations are often performed preemptively to ensure that the control plane has sufficient resources and is not typically an indication of issues. For more details, see [documentation on resizes](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions#repairs)." + ], + "type": "string" + }, +@@ -5187,7 +5433,7 @@ + "readOnly": true + }, + "selfLink": { +- "description": "Server-defined URL for the resource.", ++ "description": "Server-defined URI for the operation. Example: `https://container.googleapis.com/v1alpha1/projects/123/locations/us-central1/operations/operation-123`.", + "type": "string" + }, + "startTime": { +@@ -5218,7 +5464,7 @@ + "type": "string" + }, + "targetLink": { +- "description": "Server-defined URL for the target of the operation.", ++ "description": "Server-defined URI for the target of the operation. The format of this is a URI to the resource being modified (such as a cluster, node pool, or node). For node pool repairs, there may be multiple nodes being repaired, but only one will be the target. Examples: - ## `https://container.googleapis.com/v1/projects/123/locations/us-central1/clusters/my-cluster` ## `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/clusters/my-cluster/nodePools/my-np` `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/clusters/my-cluster/nodePools/my-np/node/my-node`", + "type": "string" + }, + "zone": { +@@ -5290,6 +5536,17 @@ + }, + "type": "object" + }, ++ "PodCIDROverprovisionConfig": { ++ "description": "[PRIVATE FIELD] Config for pod CIDR size overprovisioning.", ++ "id": "PodCIDROverprovisionConfig", ++ "properties": { ++ "disable": { ++ "description": "Whether Pod CIDR overprovisioning is disabled. Note: Pod CIDR overprovisioning is enabled by default.", ++ "type": "boolean" ++ } ++ }, ++ "type": "object" ++ }, + "PrivateClusterConfig": { + "description": "Configuration options for private clusters.", + "id": "PrivateClusterConfig", +@@ -6068,6 +6325,20 @@ + }, + "type": "object" + }, ++ "SoleTenantConfig": { ++ "description": "SoleTenantConfig contains the NodeAffinities to specify what shared sole tenant node groups should back the node pool.", ++ "id": "SoleTenantConfig", ++ "properties": { ++ "nodeAffinities": { ++ "description": "NodeAffinities used to match to a shared sole tenant node group.", ++ "items": { ++ "$ref": "NodeAffinity" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "StandardRolloutPolicy": { + "description": "Standard rollout policy is the default policy for blue-green.", + "id": "StandardRolloutPolicy", +@@ -6575,7 +6846,7 @@ + "enumDescriptions": [ + "UNKNOWN is the zero value of the Status enum. It's not a valid status.", + "UNUSED denotes that this range is unclaimed by any cluster.", +- "IN_USE_SERVICE denotes that this range is claimed by a cluster for services. It cannot be used for other clusters.", ++ "IN_USE_SERVICE denotes that this range is claimed by cluster(s) for services. User-managed services range can be shared between clusters within the same subnetwork.", + "IN_USE_SHAREABLE_POD denotes this range was created by the network admin and is currently claimed by a cluster for pods. It can only be used by other clusters as a pod range.", + "IN_USE_MANAGED_POD denotes this range was created by GKE and is claimed for pods. It cannot be used for other clusters." + ], +diff --git a/vendor/google.golang.org/api/container/v1/container-gen.go b/vendor/google.golang.org/api/container/v1/container-gen.go +index 675d4a13ba4..0982b905144 100644 +--- a/vendor/google.golang.org/api/container/v1/container-gen.go ++++ b/vendor/google.golang.org/api/container/v1/container-gen.go +@@ -71,6 +71,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "container:v1" + const apiName = "container" +@@ -322,6 +323,37 @@ func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// AdditionalPodRangesConfig: AdditionalPodRangesConfig is the ++// configuration for additional pod secondary ranges supporting the ++// ClusterUpdate message. ++type AdditionalPodRangesConfig struct { ++ // PodRangeNames: Name for pod secondary ipv4 range which has the actual ++ // range defined ahead. ++ PodRangeNames []string `json:"podRangeNames,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "PodRangeNames") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "PodRangeNames") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *AdditionalPodRangesConfig) MarshalJSON() ([]byte, error) { ++ type NoMethod AdditionalPodRangesConfig ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // AddonsConfig: Configuration for the addons that can be automatically + // spun up in the cluster, enabling additional functionality. + type AddonsConfig struct { +@@ -531,6 +563,62 @@ func (s *Autopilot) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// AutopilotCompatibilityIssue: AutopilotCompatibilityIssue contains ++// information about a specific compatibility issue with Autopilot mode. ++type AutopilotCompatibilityIssue struct { ++ // ConstraintType: The constraint type of the issue. ++ ConstraintType string `json:"constraintType,omitempty"` ++ ++ // Description: The description of the issue. ++ Description string `json:"description,omitempty"` ++ ++ // DocumentationUrl: A URL to a public documnetation, which addresses ++ // resolving this issue. ++ DocumentationUrl string `json:"documentationUrl,omitempty"` ++ ++ // IncompatibilityType: The incompatibility type of this issue. ++ // ++ // Possible values: ++ // "UNSPECIFIED" - Default value, should not be used. ++ // "INCOMPATIBILITY" - Indicates that the issue is a known ++ // incompatibility between the cluster and Autopilot mode. ++ // "ADDITIONAL_CONFIG_REQUIRED" - Indicates the issue is an ++ // incompatibility if customers take no further action to resolve. ++ // "PASSED_WITH_OPTIONAL_CONFIG" - Indicates the issue is not an ++ // incompatibility, but depending on the workloads business logic, there ++ // is a potential that they won't work on Autopilot. ++ IncompatibilityType string `json:"incompatibilityType,omitempty"` ++ ++ // LastObservation: The last time when this issue was observed. ++ LastObservation string `json:"lastObservation,omitempty"` ++ ++ // Subjects: The name of the resources which are subject to this issue. ++ Subjects []string `json:"subjects,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ConstraintType") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ConstraintType") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *AutopilotCompatibilityIssue) MarshalJSON() ([]byte, error) { ++ type NoMethod AutopilotCompatibilityIssue ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // AutoprovisioningNodePoolDefaults: AutoprovisioningNodePoolDefaults + // contains defaults for a node pool created by NAP. + type AutoprovisioningNodePoolDefaults struct { +@@ -613,6 +701,42 @@ func (s *AutoprovisioningNodePoolDefaults) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// BestEffortProvisioning: Best effort provisioning. ++type BestEffortProvisioning struct { ++ // Enabled: When this is enabled, cluster/node pool creations will ++ // ignore non-fatal errors like stockout to best provision as many nodes ++ // as possible right now and eventually bring up all target number of ++ // nodes ++ Enabled bool `json:"enabled,omitempty"` ++ ++ // MinProvisionNodes: Minimum number of nodes to be provisioned to be ++ // considered as succeeded, and the rest of nodes will be provisioned ++ // gradually and eventually when stockout issue has been resolved. ++ MinProvisionNodes int64 `json:"minProvisionNodes,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Enabled") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Enabled") to include in ++ // API requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *BestEffortProvisioning) MarshalJSON() ([]byte, error) { ++ type NoMethod BestEffortProvisioning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // BigQueryDestination: Parameters for using BigQuery as the destination + // of resource usage export. + type BigQueryDestination struct { +@@ -826,6 +950,43 @@ func (s *CancelOperationRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// CheckAutopilotCompatibilityResponse: ++// CheckAutopilotCompatibilityResponse has a list of compatibility ++// issues. ++type CheckAutopilotCompatibilityResponse struct { ++ // Issues: The list of issues for the given operation. ++ Issues []*AutopilotCompatibilityIssue `json:"issues,omitempty"` ++ ++ // Summary: The summary of the autopilot compatibility response. ++ Summary string `json:"summary,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Issues") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Issues") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *CheckAutopilotCompatibilityResponse) MarshalJSON() ([]byte, error) { ++ type NoMethod CheckAutopilotCompatibilityResponse ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // CidrBlock: CidrBlock contains an optional name and one CIDR block. + type CidrBlock struct { + // CidrBlock: cidr_block must be specified in CIDR notation. +@@ -998,6 +1159,9 @@ type Cluster struct { + // Description: An optional description of this cluster. + Description string `json:"description,omitempty"` + ++ // EnableK8sBetaApis: Beta APIs Config ++ EnableK8sBetaApis *K8sBetaAPIConfig `json:"enableK8sBetaApis,omitempty"` ++ + // EnableKubernetesAlpha: Kubernetes alpha features are enabled on this + // cluster. This includes alpha API groups (e.g. v1alpha1) and features + // that may not be production ready in the kubernetes version of the +@@ -1025,6 +1189,9 @@ type Cluster struct { + // format. + ExpireTime string `json:"expireTime,omitempty"` + ++ // Fleet: Fleet information for the cluster. ++ Fleet *Fleet `json:"fleet,omitempty"` ++ + // Id: Output only. Unique id for the cluster. + Id string `json:"id,omitempty"` + +@@ -1190,7 +1357,12 @@ type Cluster struct { + // PrivateClusterConfig: Configuration for private cluster. + PrivateClusterConfig *PrivateClusterConfig `json:"privateClusterConfig,omitempty"` + +- // ReleaseChannel: Release channel configuration. ++ // ReleaseChannel: Release channel configuration. If left unspecified on ++ // cluster creation and a version is specified, the cluster is enrolled ++ // in the most mature release channel where the version is available ++ // (first checking STABLE, then REGULAR, and finally RAPID). Otherwise, ++ // if no release channel configuration and no version is specified, the ++ // cluster is enrolled in the REGULAR channel with its default version. + ReleaseChannel *ReleaseChannel `json:"releaseChannel,omitempty"` + + // ResourceLabels: The resource labels for the cluster to use to +@@ -1352,6 +1524,11 @@ func (s *ClusterAutoscaling) MarshalJSON() ([]byte, error) { + // Exactly one update can be applied to a cluster with each request, so + // at most one field can be provided. + type ClusterUpdate struct { ++ // AdditionalPodRangesConfig: The additional pod ranges to be added to ++ // the cluster. These pod ranges can be used by node pools to allocate ++ // pod IPs. ++ AdditionalPodRangesConfig *AdditionalPodRangesConfig `json:"additionalPodRangesConfig,omitempty"` ++ + // DesiredAddonsConfig: Configurations for the various addons available + // to run in the cluster. + DesiredAddonsConfig *AddonsConfig `json:"desiredAddonsConfig,omitempty"` +@@ -1395,10 +1572,17 @@ type ClusterUpdate struct { + // cluster. + DesiredDnsConfig *DNSConfig `json:"desiredDnsConfig,omitempty"` + ++ // DesiredEnableFqdnNetworkPolicy: Enable/Disable FQDN Network Policy ++ // for the cluster. ++ DesiredEnableFqdnNetworkPolicy bool `json:"desiredEnableFqdnNetworkPolicy,omitempty"` ++ + // DesiredEnablePrivateEndpoint: Enable/Disable private endpoint for the + // cluster's master. + DesiredEnablePrivateEndpoint bool `json:"desiredEnablePrivateEndpoint,omitempty"` + ++ // DesiredFleet: The desired fleet configuration for the cluster. ++ DesiredFleet *Fleet `json:"desiredFleet,omitempty"` ++ + // DesiredGatewayApiConfig: The desired config of Gateway API on this + // cluster. + DesiredGatewayApiConfig *GatewayAPIConfig `json:"desiredGatewayApiConfig,omitempty"` +@@ -1418,6 +1602,9 @@ type ClusterUpdate struct { + // visibility. + DesiredIntraNodeVisibilityConfig *IntraNodeVisibilityConfig `json:"desiredIntraNodeVisibilityConfig,omitempty"` + ++ // DesiredK8sBetaApis: Desired Beta APIs to be enabled for cluster. ++ DesiredK8sBetaApis *K8sBetaAPIConfig `json:"desiredK8sBetaApis,omitempty"` ++ + // DesiredL4ilbSubsettingConfig: The desired L4 Internal Load Balancer + // Subsetting configuration. + DesiredL4ilbSubsettingConfig *ILBSubsettingConfig `json:"desiredL4ilbSubsettingConfig,omitempty"` +@@ -1560,26 +1747,37 @@ type ClusterUpdate struct { + // DesiredWorkloadIdentityConfig: Configuration for Workload Identity. + DesiredWorkloadIdentityConfig *WorkloadIdentityConfig `json:"desiredWorkloadIdentityConfig,omitempty"` + ++ // EnableK8sBetaApis: Kubernetes open source beta apis enabled on the ++ // cluster. Only beta apis ++ EnableK8sBetaApis *K8sBetaAPIConfig `json:"enableK8sBetaApis,omitempty"` ++ + // Etag: The current etag of the cluster. If an etag is provided and + // does not match the current etag of the cluster, update will be + // blocked and an ABORTED error will be returned. + Etag string `json:"etag,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "DesiredAddonsConfig") +- // to unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. ++ // RemovedAdditionalPodRangesConfig: The additional pod ranges that are ++ // to be removed from the cluster. The pod ranges specified here must ++ // have been specified earlier in the 'additional_pod_ranges_config' ++ // argument. ++ RemovedAdditionalPodRangesConfig *AdditionalPodRangesConfig `json:"removedAdditionalPodRangesConfig,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "AdditionalPodRangesConfig") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "DesiredAddonsConfig") to +- // include in API requests with the JSON null value. By default, fields +- // with empty values are omitted from API requests. However, any field +- // with an empty value appearing in NullFields will be sent to the +- // server as null. It is an error if a field in this list has a +- // non-empty value. This may be used to include null fields in Patch +- // requests. ++ // NullFields is a list of field names (e.g. ++ // "AdditionalPodRangesConfig") to include in API requests with the JSON ++ // null value. By default, fields with empty values are omitted from API ++ // requests. However, any field with an empty value appearing in ++ // NullFields will be sent to the server as null. It is an error if a ++ // field in this list has a non-empty value. This may be used to include ++ // null fields in Patch requests. + NullFields []string `json:"-"` + } + +@@ -1952,7 +2150,7 @@ type DatabaseEncryption struct { + // y + KeyName string `json:"keyName,omitempty"` + +- // State: Denotes the state of etcd encryption. ++ // State: The desired state of etcd encryption. + // + // Possible values: + // "UNKNOWN" - Should never be set +@@ -2155,6 +2353,45 @@ func (s *Filter) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// Fleet: Fleet is the fleet configuration for the cluster. ++type Fleet struct { ++ // Membership: [Output only] The full resource name of the registered ++ // fleet membership of the cluster, in the format ++ // `//gkehub.googleapis.com/projects/*/locations/*/memberships/*`. ++ Membership string `json:"membership,omitempty"` ++ ++ // PreRegistered: [Output only] Whether the cluster has been registered ++ // through the fleet API. ++ PreRegistered bool `json:"preRegistered,omitempty"` ++ ++ // Project: The Fleet host project(project ID or project number) where ++ // this cluster will be registered to. This field cannot be changed ++ // after the cluster has been registered. ++ Project string `json:"project,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Membership") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Membership") to include in ++ // API requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *Fleet) MarshalJSON() ([]byte, error) { ++ type NoMethod Fleet ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // GPUSharingConfig: GPUSharingConfig represents the GPU sharing + // configuration for Hardware Accelerators. + type GPUSharingConfig struct { +@@ -2576,6 +2813,12 @@ func (s *ILBSubsettingConfig) MarshalJSON() ([]byte, error) { + // IPAllocationPolicy: Configuration for controlling how IPs are + // allocated in the cluster. + type IPAllocationPolicy struct { ++ // AdditionalPodRangesConfig: Output only. [Output only] The additional ++ // pod ranges that are added to the cluster. These pod ranges can be ++ // used by new node pools to allocate pod IPs automatically. Once the ++ // range is removed it will not show up in IPAllocationPolicy. ++ AdditionalPodRangesConfig *AdditionalPodRangesConfig `json:"additionalPodRangesConfig,omitempty"` ++ + // ClusterIpv4Cidr: This field is deprecated, use + // cluster_ipv4_cidr_block. + ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` +@@ -2630,6 +2873,17 @@ type IPAllocationPolicy struct { + // specific range to use. + NodeIpv4CidrBlock string `json:"nodeIpv4CidrBlock,omitempty"` + ++ // PodCidrOverprovisionConfig: [PRIVATE FIELD] Pod CIDR size ++ // overprovisioning config for the cluster. Pod CIDR size per node ++ // depends on max_pods_per_node. By default, the value of ++ // max_pods_per_node is doubled and then rounded off to next power of 2 ++ // to get the size of pod CIDR block per node. Example: ++ // max_pods_per_node of 30 would result in 64 IPs (/26). This config can ++ // disable the doubling of IPs (we still round off to next power of 2) ++ // Example: max_pods_per_node of 30 will result in 32 IPs (/27) when ++ // overprovisioning is disabled. ++ PodCidrOverprovisionConfig *PodCIDROverprovisionConfig `json:"podCidrOverprovisionConfig,omitempty"` ++ + // ServicesIpv4Cidr: This field is deprecated, use + // services_ipv4_cidr_block. + ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` +@@ -2699,21 +2953,22 @@ type IPAllocationPolicy struct { + // false, then the server picks the default IP allocation mode + UseRoutes bool `json:"useRoutes,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "ClusterIpv4Cidr") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. ++ // ForceSendFields is a list of field names (e.g. ++ // "AdditionalPodRangesConfig") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "ClusterIpv4Cidr") to +- // include in API requests with the JSON null value. By default, fields +- // with empty values are omitted from API requests. However, any field +- // with an empty value appearing in NullFields will be sent to the +- // server as null. It is an error if a field in this list has a +- // non-empty value. This may be used to include null fields in Patch +- // requests. ++ // NullFields is a list of field names (e.g. ++ // "AdditionalPodRangesConfig") to include in API requests with the JSON ++ // null value. By default, fields with empty values are omitted from API ++ // requests. However, any field with an empty value appearing in ++ // NullFields will be sent to the server as null. It is an error if a ++ // field in this list has a non-empty value. This may be used to include ++ // null fields in Patch requests. + NullFields []string `json:"-"` + } + +@@ -2834,6 +3089,34 @@ func (s *Jwk) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// K8sBetaAPIConfig: K8sBetaAPIConfig , configuration for beta APIs ++type K8sBetaAPIConfig struct { ++ // EnabledApis: Enabled k8s beta APIs. ++ EnabledApis []string `json:"enabledApis,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "EnabledApis") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "EnabledApis") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *K8sBetaAPIConfig) MarshalJSON() ([]byte, error) { ++ type NoMethod K8sBetaAPIConfig ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // KubernetesDashboard: Configuration for the Kubernetes Dashboard. + type KubernetesDashboard struct { + // Disabled: Whether the Kubernetes Dashboard is enabled for this +@@ -3696,6 +3979,10 @@ type NetworkConfig struct { + // DnsConfig: DNSConfig contains clusterDNS config for this cluster. + DnsConfig *DNSConfig `json:"dnsConfig,omitempty"` + ++ // EnableFqdnNetworkPolicy: Whether FQDN Network Policy is enabled on ++ // this cluster. ++ EnableFqdnNetworkPolicy bool `json:"enableFqdnNetworkPolicy,omitempty"` ++ + // EnableIntraNodeVisibility: Whether Intra-node visibility is enabled + // for this cluster. This makes same node pod to pod traffic visible for + // VPC network. +@@ -3895,6 +4182,47 @@ func (s *NetworkTags) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// NodeAffinity: Specifies the NodeAffinity key, values, and affinity ++// operator according to shared sole tenant node group affinities ++// (https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity). ++type NodeAffinity struct { ++ // Key: Key for NodeAffinity. ++ Key string `json:"key,omitempty"` ++ ++ // Operator: Operator for NodeAffinity. ++ // ++ // Possible values: ++ // "OPERATOR_UNSPECIFIED" - Invalid or unspecified affinity operator. ++ // "IN" - Affinity operator. ++ // "NOT_IN" - Anti-affinity operator. ++ Operator string `json:"operator,omitempty"` ++ ++ // Values: Values for NodeAffinity. ++ Values []string `json:"values,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NodeAffinity) MarshalJSON() ([]byte, error) { ++ type NoMethod NodeAffinity ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // NodeConfig: Parameters that describe the nodes in a cluster. GKE + // Autopilot clusters do not recognize parameters in `NodeConfig`. Use + // AutoprovisioningNodePoolDefaults instead. +@@ -4057,6 +4385,10 @@ type NodeConfig struct { + // ShieldedInstanceConfig: Shielded Instance options. + ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + ++ // SoleTenantConfig: Parameters for node pools to be backed by shared ++ // sole tenant node groups. ++ SoleTenantConfig *SoleTenantConfig `json:"soleTenantConfig,omitempty"` ++ + // Spot: Spot flag for enabling Spot VM, which is a rebrand of the + // existing preemptible flag. + Spot bool `json:"spot,omitempty"` +@@ -4282,6 +4614,17 @@ type NodeNetworkConfig struct { + // NetworkPerformanceConfig: Network bandwidth tier configuration. + NetworkPerformanceConfig *NetworkPerformanceConfig `json:"networkPerformanceConfig,omitempty"` + ++ // PodCidrOverprovisionConfig: [PRIVATE FIELD] Pod CIDR size ++ // overprovisioning config for the nodepool. Pod CIDR size per node ++ // depends on max_pods_per_node. By default, the value of ++ // max_pods_per_node is rounded off to next power of 2 and we then ++ // double that to get the size of pod CIDR block per node. Example: ++ // max_pods_per_node of 30 would result in 64 IPs (/26). This config can ++ // disable the doubling of IPs (we still round off to next power of 2) ++ // Example: max_pods_per_node of 30 will result in 32 IPs (/27) when ++ // overprovisioning is disabled. ++ PodCidrOverprovisionConfig *PodCIDROverprovisionConfig `json:"podCidrOverprovisionConfig,omitempty"` ++ + // PodIpv4CidrBlock: The IP address range for pod IPs in this node pool. + // Only applicable if `create_pod_range` is true. Set to blank to have a + // range chosen with the default size. Set to /netmask (e.g. `/14`) to +@@ -4335,6 +4678,9 @@ type NodePool struct { + // is enabled only if a valid configuration is present. + Autoscaling *NodePoolAutoscaling `json:"autoscaling,omitempty"` + ++ // BestEffortProvisioning: Enable best effort provisioning for nodes ++ BestEffortProvisioning *BestEffortProvisioning `json:"bestEffortProvisioning,omitempty"` ++ + // Conditions: Which conditions caused the current node pool state. + Conditions []*StatusCondition `json:"conditions,omitempty"` + +@@ -4752,29 +5098,95 @@ type Operation struct { + // + // Possible values: + // "TYPE_UNSPECIFIED" - Not set. +- // "CREATE_CLUSTER" - Cluster create. +- // "DELETE_CLUSTER" - Cluster delete. +- // "UPGRADE_MASTER" - A master upgrade. +- // "UPGRADE_NODES" - A node upgrade. +- // "REPAIR_CLUSTER" - Cluster repair. +- // "UPDATE_CLUSTER" - Cluster update. +- // "CREATE_NODE_POOL" - Node pool create. +- // "DELETE_NODE_POOL" - Node pool delete. +- // "SET_NODE_POOL_MANAGEMENT" - Set node pool management. +- // "AUTO_REPAIR_NODES" - Automatic node pool repair. +- // "AUTO_UPGRADE_NODES" - Automatic node upgrade. +- // "SET_LABELS" - Set labels. +- // "SET_MASTER_AUTH" - Set/generate master auth materials +- // "SET_NODE_POOL_SIZE" - Set node pool size. +- // "SET_NETWORK_POLICY" - Updates network policy for a cluster. +- // "SET_MAINTENANCE_POLICY" - Set the maintenance policy. ++ // "CREATE_CLUSTER" - The cluster is being created. The cluster should ++ // be assumed to be unusable until the operation finishes. In the event ++ // of the operation failing, the cluster will enter the ERROR state and ++ // eventually be deleted. ++ // "DELETE_CLUSTER" - The cluster is being deleted. The cluster should ++ // be assumed to be unusable as soon as this operation starts. In the ++ // event of the operation failing, the cluster will enter the ERROR ++ // state and the deletion will be automatically retried until completed. ++ // "UPGRADE_MASTER" - The cluster version is being updated. Note that ++ // this includes "upgrades" to the same version, which are simply a ++ // recreation. This also includes ++ // [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/concep ++ // ts/cluster-upgrades#upgrading_automatically). For more details, see ++ // [documentation on cluster ++ // upgrades](https://cloud.google.com/kubernetes-engine/docs/concepts/clu ++ // ster-upgrades#cluster_upgrades). ++ // "UPGRADE_NODES" - A node pool is being updated. Despite calling ++ // this an "upgrade", this includes most forms of updates to node pools. ++ // This also includes ++ // [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/how-to ++ // /node-auto-upgrades). This operation sets the progress field and may ++ // be canceled. The upgrade strategy depends on [node pool ++ // configuration](https://cloud.google.com/kubernetes-engine/docs/concept ++ // s/node-pool-upgrade-strategies). The nodes are generally still usable ++ // during this operation. ++ // "REPAIR_CLUSTER" - A problem has been detected with the control ++ // plane and is being repaired. This operation type is initiated by GKE. ++ // For more details, see [documentation on ++ // repairs](https://cloud.google.com/kubernetes-engine/docs/concepts/main ++ // tenance-windows-and-exclusions#repairs). ++ // "UPDATE_CLUSTER" - The cluster is being updated. This is a broad ++ // category of operations and includes operations that only change ++ // metadata as well as those that must recreate the entire cluster. If ++ // the control plane must be recreated, this will cause temporary ++ // downtime for zonal clusters. Some features require recreating the ++ // nodes as well. Those will be recreated as separate operations and the ++ // update may not be completely functional until the node pools ++ // recreations finish. Node recreations will generally follow ++ // [maintenance ++ // policies](https://cloud.google.com/kubernetes-engine/docs/concepts/mai ++ // ntenance-windows-and-exclusions). Some GKE-initiated operations use ++ // this type. This includes certain types of auto-upgrades and incident ++ // mitigations. ++ // "CREATE_NODE_POOL" - A node pool is being created. The node pool ++ // should be assumed to be unusable until this operation finishes. In ++ // the event of an error, the node pool may be partially created. If ++ // enabled, [node ++ // autoprovisioning](https://cloud.google.com/kubernetes-engine/docs/how- ++ // to/node-auto-provisioning) may have automatically initiated such ++ // operations. ++ // "DELETE_NODE_POOL" - The node pool is being deleted. The node pool ++ // should be assumed to be unusable as soon as this operation starts. ++ // "SET_NODE_POOL_MANAGEMENT" - The node pool's manamagent field is ++ // being updated. These operations only update metadata and may be ++ // concurrent with most other operations. ++ // "AUTO_REPAIR_NODES" - A problem has been detected with nodes and ++ // [they are being ++ // repaired](https://cloud.google.com/kubernetes-engine/docs/how-to/node- ++ // auto-repair). This operation type is initiated by GKE, typically ++ // automatically. This operation may be concurrent with other operations ++ // and there may be multiple repairs occurring on the same node pool. ++ // "AUTO_UPGRADE_NODES" - Unused. Automatic node upgrade uses ++ // UPGRADE_NODES. ++ // "SET_LABELS" - Unused. Updating labels uses UPDATE_CLUSTER. ++ // "SET_MASTER_AUTH" - Unused. Updating master auth uses ++ // UPDATE_CLUSTER. ++ // "SET_NODE_POOL_SIZE" - The node pool is being resized. With the ++ // exception of resizing to or from size zero, the node pool is ++ // generally usable during this operation. ++ // "SET_NETWORK_POLICY" - Unused. Updating network policy uses ++ // UPDATE_CLUSTER. ++ // "SET_MAINTENANCE_POLICY" - Unused. Updating maintenance policy uses ++ // UPDATE_CLUSTER. ++ // "RESIZE_CLUSTER" - The control plane is being resized. This ++ // operation type is initiated by GKE. These operations are often ++ // performed preemptively to ensure that the control plane has ++ // sufficient resources and is not typically an indication of issues. ++ // For more details, see [documentation on ++ // resizes](https://cloud.google.com/kubernetes-engine/docs/concepts/main ++ // tenance-windows-and-exclusions#repairs). + OperationType string `json:"operationType,omitempty"` + + // Progress: Output only. [Output only] Progress information for an + // operation. + Progress *OperationProgress `json:"progress,omitempty"` + +- // SelfLink: Server-defined URL for the resource. ++ // SelfLink: Server-defined URI for the operation. Example: ++ // `https://container.googleapis.com/v1alpha1/projects/123/locations/us-c ++ // entral1/operations/operation-123`. + SelfLink string `json:"selfLink,omitempty"` + + // StartTime: [Output only] The time the operation started, in RFC3339 +@@ -4795,7 +5207,17 @@ type Operation struct { + // description of the error. Deprecated. Use the field error instead. + StatusMessage string `json:"statusMessage,omitempty"` + +- // TargetLink: Server-defined URL for the target of the operation. ++ // TargetLink: Server-defined URI for the target of the operation. The ++ // format of this is a URI to the resource being modified (such as a ++ // cluster, node pool, or node). For node pool repairs, there may be ++ // multiple nodes being repaired, but only one will be the target. ++ // Examples: - ## ++ // `https://container.googleapis.com/v1/projects/123/locations/us-central ++ // 1/clusters/my-cluster` ## ++ // `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/ ++ // clusters/my-cluster/nodePools/my-np` ++ // `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/ ++ // clusters/my-cluster/nodePools/my-np/node/my-node` + TargetLink string `json:"targetLink,omitempty"` + + // Zone: The name of the Google Compute Engine zone +@@ -4917,6 +5339,36 @@ func (s *PlacementPolicy) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// PodCIDROverprovisionConfig: [PRIVATE FIELD] Config for pod CIDR size ++// overprovisioning. ++type PodCIDROverprovisionConfig struct { ++ // Disable: Whether Pod CIDR overprovisioning is disabled. Note: Pod ++ // CIDR overprovisioning is enabled by default. ++ Disable bool `json:"disable,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Disable") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Disable") to include in ++ // API requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *PodCIDROverprovisionConfig) MarshalJSON() ([]byte, error) { ++ type NoMethod PodCIDROverprovisionConfig ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // PrivateClusterConfig: Configuration options for private clusters. + type PrivateClusterConfig struct { + // EnablePrivateEndpoint: Whether the master's internal IP address is +@@ -6306,6 +6758,38 @@ func (s *ShieldedNodes) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// SoleTenantConfig: SoleTenantConfig contains the NodeAffinities to ++// specify what shared sole tenant node groups should back the node ++// pool. ++type SoleTenantConfig struct { ++ // NodeAffinities: NodeAffinities used to match to a shared sole tenant ++ // node group. ++ NodeAffinities []*NodeAffinity `json:"nodeAffinities,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "NodeAffinities") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "NodeAffinities") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SoleTenantConfig) MarshalJSON() ([]byte, error) { ++ type NoMethod SoleTenantConfig ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // StandardRolloutPolicy: Standard rollout policy is the default policy + // for blue-green. + type StandardRolloutPolicy struct { +@@ -7153,8 +7637,8 @@ type UsableSubnetworkSecondaryRange struct { + // "UNUSED" - UNUSED denotes that this range is unclaimed by any + // cluster. + // "IN_USE_SERVICE" - IN_USE_SERVICE denotes that this range is +- // claimed by a cluster for services. It cannot be used for other +- // clusters. ++ // claimed by cluster(s) for services. User-managed services range can ++ // be shared between clusters within the same subnetwork. + // "IN_USE_SHAREABLE_POD" - IN_USE_SHAREABLE_POD denotes this range + // was created by the network admin and is currently claimed by a + // cluster for pods. It can only be used by other clusters as a pod +@@ -7742,6 +8226,156 @@ func (c *ProjectsLocationsGetServerConfigCall) Do(opts ...googleapi.CallOption) + + } + ++// method id "container.projects.locations.clusters.checkAutopilotCompatibility": ++ ++type ProjectsLocationsClustersCheckAutopilotCompatibilityCall struct { ++ s *Service ++ name string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// CheckAutopilotCompatibility: Checks the cluster compatibility with ++// Autopilot mode, and returns a list of compatibility issues. ++// ++// - name: The name (project, location, cluster) of the cluster to ++// retrieve. Specified in the format ++// `projects/*/locations/*/clusters/*`. ++func (r *ProjectsLocationsClustersService) CheckAutopilotCompatibility(name string) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { ++ c := &ProjectsLocationsClustersCheckAutopilotCompatibilityCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.name = name ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) IfNoneMatch(entityTag string) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Context(ctx context.Context) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:checkAutopilotCompatibility") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "name": c.name, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "container.projects.locations.clusters.checkAutopilotCompatibility" call. ++// Exactly one of *CheckAutopilotCompatibilityResponse or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *CheckAutopilotCompatibilityResponse.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Do(opts ...googleapi.CallOption) (*CheckAutopilotCompatibilityResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &CheckAutopilotCompatibilityResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Checks the cluster compatibility with Autopilot mode, and returns a list of compatibility issues.", ++ // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:checkAutopilotCompatibility", ++ // "httpMethod": "GET", ++ // "id": "container.projects.locations.clusters.checkAutopilotCompatibility", ++ // "parameterOrder": [ ++ // "name" ++ // ], ++ // "parameters": { ++ // "name": { ++ // "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", ++ // "location": "path", ++ // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "v1/{+name}:checkAutopilotCompatibility", ++ // "response": { ++ // "$ref": "CheckAutopilotCompatibilityResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform" ++ // ] ++ // } ++ ++} ++ + // method id "container.projects.locations.clusters.completeIpRotation": + + type ProjectsLocationsClustersCompleteIpRotationCall struct { +@@ -9138,6 +9772,7 @@ func (c *ProjectsLocationsClustersSetLocationsCall) Do(opts ...googleapi.CallOpt + } + return ret, nil + // { ++ // "deprecated": true, + // "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", + // "httpMethod": "POST", +@@ -14247,6 +14882,7 @@ func (c *ProjectsZonesClustersLocationsCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { ++ // "deprecated": true, + // "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", + // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", + // "httpMethod": "POST", +diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go +index b328a7976ab..b5e38c66282 100644 +--- a/vendor/google.golang.org/api/googleapi/googleapi.go ++++ b/vendor/google.golang.org/api/googleapi/googleapi.go +@@ -11,7 +11,6 @@ import ( + "encoding/json" + "fmt" + "io" +- "io/ioutil" + "net/http" + "net/url" + "strings" +@@ -144,7 +143,7 @@ func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } +- slurp, err := ioutil.ReadAll(res.Body) ++ slurp, err := io.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) +@@ -184,7 +183,7 @@ func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } +- slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) ++ slurp, _ := io.ReadAll(io.LimitReader(res.Body, 1<<20)) + return &Error{ + Code: res.StatusCode, + Body: string(slurp), +diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go +new file mode 100644 +index 00000000000..cecbb9ba115 +--- /dev/null ++++ b/vendor/google.golang.org/api/internal/cba.go +@@ -0,0 +1,282 @@ ++// Copyright 2020 Google LLC. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// cba.go (certificate-based access) contains utils for implementing Device Certificate ++// Authentication according to https://google.aip.dev/auth/4114 and Default Credentials ++// for Google Cloud Virtual Environments according to https://google.aip.dev/auth/4115. ++// ++// The overall logic for DCA is as follows: ++// 1. If both endpoint override and client certificate are specified, use them as is. ++// 2. If user does not specify client certificate, we will attempt to use default ++// client certificate. ++// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if ++// client certificate is available and defaultEndpoint otherwise. ++// ++// Implications of the above logic: ++// 1. If the user specifies a non-mTLS endpoint override but client certificate is ++// available, we will pass along the cert anyway and let the server decide what to do. ++// 2. If the user specifies an mTLS endpoint override but client certificate is not ++// available, we will not fail-fast, but let backend throw error when connecting. ++// ++// If running within Google's cloud environment, and client certificate is not specified ++// and not available through DCA, we will try mTLS with credentials held by ++// the Secure Session Agent, which is part of Google's cloud infrastructure. ++// ++// We would like to avoid introducing client-side logic that parses whether the ++// endpoint override is an mTLS url, since the url pattern may change at anytime. ++// ++// This package is not intended for use by end developers. Use the ++// google.golang.org/api/option package to configure API clients. ++ ++// Package internal supports the options and transport packages. ++package internal ++ ++import ( ++ "context" ++ "crypto/tls" ++ "net" ++ "net/url" ++ "os" ++ "strings" ++ ++ "github.com/google/s2a-go" ++ "github.com/google/s2a-go/fallback" ++ "google.golang.org/api/internal/cert" ++ "google.golang.org/grpc/credentials" ++) ++ ++const ( ++ mTLSModeAlways = "always" ++ mTLSModeNever = "never" ++ mTLSModeAuto = "auto" ++ ++ // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. ++ googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" ++) ++ ++// getClientCertificateSourceAndEndpoint is a convenience function that invokes ++// getClientCertificateSource and getEndpoint sequentially and returns the client ++// cert source and endpoint as a tuple. ++func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { ++ clientCertSource, err := getClientCertificateSource(settings) ++ if err != nil { ++ return nil, "", err ++ } ++ endpoint, err := getEndpoint(settings, clientCertSource) ++ if err != nil { ++ return nil, "", err ++ } ++ return clientCertSource, endpoint, nil ++} ++ ++type transportConfig struct { ++ clientCertSource cert.Source // The client certificate source. ++ endpoint string // The corresponding endpoint to use based on client certificate source. ++ s2aAddress string // The S2A address if it can be used, otherwise an empty string. ++ s2aMTLSEndpoint string // The MTLS endpoint to use with S2A. ++} ++ ++func getTransportConfig(settings *DialSettings) (*transportConfig, error) { ++ clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings) ++ if err != nil { ++ return &transportConfig{ ++ clientCertSource: nil, endpoint: "", s2aAddress: "", s2aMTLSEndpoint: "", ++ }, err ++ } ++ defaultTransportConfig := transportConfig{ ++ clientCertSource: clientCertSource, ++ endpoint: endpoint, ++ s2aAddress: "", ++ s2aMTLSEndpoint: "", ++ } ++ ++ // Check the env to determine whether to use S2A. ++ if !isGoogleS2AEnabled() { ++ return &defaultTransportConfig, nil ++ } ++ ++ // If client cert is found, use that over S2A. ++ // If MTLS is not enabled for the endpoint, skip S2A. ++ if clientCertSource != nil || !mtlsEndpointEnabledForS2A() { ++ return &defaultTransportConfig, nil ++ } ++ s2aMTLSEndpoint := settings.DefaultMTLSEndpoint ++ // If there is endpoint override, honor it. ++ if settings.Endpoint != "" { ++ s2aMTLSEndpoint = endpoint ++ } ++ s2aAddress := GetS2AAddress() ++ if s2aAddress == "" { ++ return &defaultTransportConfig, nil ++ } ++ return &transportConfig{ ++ clientCertSource: clientCertSource, ++ endpoint: endpoint, ++ s2aAddress: s2aAddress, ++ s2aMTLSEndpoint: s2aMTLSEndpoint, ++ }, nil ++} ++ ++func isGoogleS2AEnabled() bool { ++ return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" ++} ++ ++// getClientCertificateSource returns a default client certificate source, if ++// not provided by the user. ++// ++// A nil default source can be returned if the source does not exist. Any exceptions ++// encountered while initializing the default source will be reported as client ++// error (ex. corrupt metadata file). ++// ++// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE ++// must be set to "true" to allow certificate to be used (including user provided ++// certificates). For details, see AIP-4114. ++func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { ++ if !isClientCertificateEnabled() { ++ return nil, nil ++ } else if settings.ClientCertSource != nil { ++ return settings.ClientCertSource, nil ++ } else { ++ return cert.DefaultSource() ++ } ++} ++ ++func isClientCertificateEnabled() bool { ++ useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") ++ // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. ++ return strings.ToLower(useClientCert) == "true" ++} ++ ++// getEndpoint returns the endpoint for the service, taking into account the ++// user-provided endpoint override "settings.Endpoint". ++// ++// If no endpoint override is specified, we will either return the default endpoint or ++// the default mTLS endpoint if a client certificate is available. ++// ++// You can override the default endpoint choice (mtls vs. regular) by setting the ++// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. ++// ++// If the endpoint override is an address (host:port) rather than full base ++// URL (ex. https://...), then the user-provided address will be merged into ++// the default endpoint. For example, WithEndpoint("myhost:8000") and ++// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" ++func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { ++ if settings.Endpoint == "" { ++ mtlsMode := getMTLSMode() ++ if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { ++ return settings.DefaultMTLSEndpoint, nil ++ } ++ return settings.DefaultEndpoint, nil ++ } ++ if strings.Contains(settings.Endpoint, "://") { ++ // User passed in a full URL path, use it verbatim. ++ return settings.Endpoint, nil ++ } ++ if settings.DefaultEndpoint == "" { ++ // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. ++ // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. ++ return settings.Endpoint, nil ++ } ++ ++ // Assume user-provided endpoint is host[:port], merge it with the default endpoint. ++ return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) ++} ++ ++func getMTLSMode() string { ++ mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") ++ if mode == "" { ++ mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. ++ } ++ if mode == "" { ++ return mTLSModeAuto ++ } ++ return strings.ToLower(mode) ++} ++ ++func mergeEndpoints(baseURL, newHost string) (string, error) { ++ u, err := url.Parse(fixScheme(baseURL)) ++ if err != nil { ++ return "", err ++ } ++ return strings.Replace(baseURL, u.Host, newHost, 1), nil ++} ++ ++func fixScheme(baseURL string) string { ++ if !strings.Contains(baseURL, "://") { ++ return "https://" + baseURL ++ } ++ return baseURL ++} ++ ++// GetGRPCTransportConfigAndEndpoint returns an instance of credentials.TransportCredentials, and the ++// corresponding endpoint to use for GRPC client. ++func GetGRPCTransportConfigAndEndpoint(settings *DialSettings) (credentials.TransportCredentials, string, error) { ++ config, err := getTransportConfig(settings) ++ if err != nil { ++ return nil, "", err ++ } ++ ++ defaultTransportCreds := credentials.NewTLS(&tls.Config{ ++ GetClientCertificate: config.clientCertSource, ++ }) ++ if config.s2aAddress == "" { ++ return defaultTransportCreds, config.endpoint, nil ++ } ++ ++ var fallbackOpts *s2a.FallbackOptions ++ // In case of S2A failure, fall back to the endpoint that would've been used without S2A. ++ if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { ++ fallbackOpts = &s2a.FallbackOptions{ ++ FallbackClientHandshakeFunc: fallbackHandshake, ++ } ++ } ++ ++ s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ ++ S2AAddress: config.s2aAddress, ++ FallbackOpts: fallbackOpts, ++ }) ++ if err != nil { ++ // Use default if we cannot initialize S2A client transport credentials. ++ return defaultTransportCreds, config.endpoint, nil ++ } ++ return s2aTransportCreds, config.s2aMTLSEndpoint, nil ++} ++ ++// GetHTTPTransportConfigAndEndpoint returns a client certificate source, a function for dialing MTLS with S2A, ++// and the endpoint to use for HTTP client. ++func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, func(context.Context, string, string) (net.Conn, error), string, error) { ++ config, err := getTransportConfig(settings) ++ if err != nil { ++ return nil, nil, "", err ++ } ++ ++ if config.s2aAddress == "" { ++ return config.clientCertSource, nil, config.endpoint, nil ++ } ++ ++ var fallbackOpts *s2a.FallbackOptions ++ // In case of S2A failure, fall back to the endpoint that would've been used without S2A. ++ if fallbackURL, err := url.Parse(config.endpoint); err == nil { ++ if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { ++ fallbackOpts = &s2a.FallbackOptions{ ++ FallbackDialer: &s2a.FallbackDialer{ ++ Dialer: fallbackDialer, ++ ServerAddr: fallbackServerAddr, ++ }, ++ } ++ } ++ } ++ ++ dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ ++ S2AAddress: config.s2aAddress, ++ FallbackOpts: fallbackOpts, ++ }) ++ return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil ++} ++ ++// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. ++var mtlsEndpointEnabledForS2A = func() bool { ++ // TODO(xmenxk): determine this via discovery config. ++ return true ++} +diff --git a/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go +index 5913cab8017..afd79ffe2be 100644 +--- a/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go ++++ b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go +@@ -18,7 +18,6 @@ import ( + "encoding/json" + "errors" + "fmt" +- "io/ioutil" + "os" + "os/exec" + "os/user" +@@ -59,7 +58,7 @@ func NewSecureConnectSource(configFilePath string) (Source, error) { + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + +- file, err := ioutil.ReadFile(configFilePath) ++ file, err := os.ReadFile(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Secure Connect is not supported. +diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go +index 63c66092203..92b3acf6edf 100644 +--- a/vendor/google.golang.org/api/internal/creds.go ++++ b/vendor/google.golang.org/api/internal/creds.go +@@ -10,7 +10,6 @@ import ( + "encoding/json" + "errors" + "fmt" +- "io/ioutil" + "net" + "net/http" + "os" +@@ -48,7 +47,7 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) + } + if ds.CredentialsFile != "" { +- data, err := ioutil.ReadFile(ds.CredentialsFile) ++ data, err := os.ReadFile(ds.CredentialsFile) + if err != nil { + return nil, fmt.Errorf("cannot read credentials file: %v", err) + } +@@ -92,7 +91,7 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g + + // Determine configurations for the OAuth2 transport, which is separate from the API transport. + // The OAuth2 transport and endpoint will be configured for mTLS if applicable. +- clientCertSource, oauth2Endpoint, err := GetClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) ++ clientCertSource, oauth2Endpoint, err := getClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) + if err != nil { + return nil, err + } +diff --git a/vendor/google.golang.org/api/internal/dca.go b/vendor/google.golang.org/api/internal/dca.go +deleted file mode 100644 +index 204a3fd2f3f..00000000000 +--- a/vendor/google.golang.org/api/internal/dca.go ++++ /dev/null +@@ -1,144 +0,0 @@ +-// Copyright 2020 Google LLC. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// Package dca contains utils for implementing Device Certificate +-// Authentication according to https://google.aip.dev/auth/4114 +-// +-// The overall logic for DCA is as follows: +-// 1. If both endpoint override and client certificate are specified, use them as is. +-// 2. If user does not specify client certificate, we will attempt to use default +-// client certificate. +-// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +-// client certificate is available and defaultEndpoint otherwise. +-// +-// Implications of the above logic: +-// 1. If the user specifies a non-mTLS endpoint override but client certificate is +-// available, we will pass along the cert anyway and let the server decide what to do. +-// 2. If the user specifies an mTLS endpoint override but client certificate is not +-// available, we will not fail-fast, but let backend throw error when connecting. +-// +-// We would like to avoid introducing client-side logic that parses whether the +-// endpoint override is an mTLS url, since the url pattern may change at anytime. +-// +-// This package is not intended for use by end developers. Use the +-// google.golang.org/api/option package to configure API clients. +- +-// Package internal supports the options and transport packages. +-package internal +- +-import ( +- "net/url" +- "os" +- "strings" +- +- "google.golang.org/api/internal/cert" +-) +- +-const ( +- mTLSModeAlways = "always" +- mTLSModeNever = "never" +- mTLSModeAuto = "auto" +-) +- +-// GetClientCertificateSourceAndEndpoint is a convenience function that invokes +-// getClientCertificateSource and getEndpoint sequentially and returns the client +-// cert source and endpoint as a tuple. +-func GetClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { +- clientCertSource, err := getClientCertificateSource(settings) +- if err != nil { +- return nil, "", err +- } +- endpoint, err := getEndpoint(settings, clientCertSource) +- if err != nil { +- return nil, "", err +- } +- return clientCertSource, endpoint, nil +-} +- +-// getClientCertificateSource returns a default client certificate source, if +-// not provided by the user. +-// +-// A nil default source can be returned if the source does not exist. Any exceptions +-// encountered while initializing the default source will be reported as client +-// error (ex. corrupt metadata file). +-// +-// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE +-// must be set to "true" to allow certificate to be used (including user provided +-// certificates). For details, see AIP-4114. +-func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { +- if !isClientCertificateEnabled() { +- return nil, nil +- } else if settings.ClientCertSource != nil { +- return settings.ClientCertSource, nil +- } else { +- return cert.DefaultSource() +- } +-} +- +-func isClientCertificateEnabled() bool { +- useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") +- // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. +- return strings.ToLower(useClientCert) == "true" +-} +- +-// getEndpoint returns the endpoint for the service, taking into account the +-// user-provided endpoint override "settings.Endpoint". +-// +-// If no endpoint override is specified, we will either return the default endpoint or +-// the default mTLS endpoint if a client certificate is available. +-// +-// You can override the default endpoint choice (mtls vs. regular) by setting the +-// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +-// +-// If the endpoint override is an address (host:port) rather than full base +-// URL (ex. https://...), then the user-provided address will be merged into +-// the default endpoint. For example, WithEndpoint("myhost:8000") and +-// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +-func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { +- if settings.Endpoint == "" { +- mtlsMode := getMTLSMode() +- if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { +- return settings.DefaultMTLSEndpoint, nil +- } +- return settings.DefaultEndpoint, nil +- } +- if strings.Contains(settings.Endpoint, "://") { +- // User passed in a full URL path, use it verbatim. +- return settings.Endpoint, nil +- } +- if settings.DefaultEndpoint == "" { +- // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. +- // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. +- return settings.Endpoint, nil +- } +- +- // Assume user-provided endpoint is host[:port], merge it with the default endpoint. +- return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) +-} +- +-func getMTLSMode() string { +- mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") +- if mode == "" { +- mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. +- } +- if mode == "" { +- return mTLSModeAuto +- } +- return strings.ToLower(mode) +-} +- +-func mergeEndpoints(baseURL, newHost string) (string, error) { +- u, err := url.Parse(fixScheme(baseURL)) +- if err != nil { +- return "", err +- } +- return strings.Replace(baseURL, u.Host, newHost, 1), nil +-} +- +-func fixScheme(baseURL string) string { +- if !strings.Contains(baseURL, "://") { +- return "https://" + baseURL +- } +- return baseURL +-} +diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go +index 8356e7f27b0..c048a57084b 100644 +--- a/vendor/google.golang.org/api/internal/gensupport/media.go ++++ b/vendor/google.golang.org/api/internal/gensupport/media.go +@@ -8,7 +8,6 @@ import ( + "bytes" + "fmt" + "io" +- "io/ioutil" + "mime" + "mime/multipart" + "net/http" +@@ -222,8 +221,8 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB + toCleanup = append(toCleanup, combined) + if fb != nil && fm != nil { + getBody = func() (io.ReadCloser, error) { +- rb := ioutil.NopCloser(fb()) +- rm := ioutil.NopCloser(fm()) ++ rb := io.NopCloser(fb()) ++ rm := io.NopCloser(fm()) + var mimeBoundary string + if _, params, err := mime.ParseMediaType(ctype); err == nil { + mimeBoundary = params["boundary"] +@@ -243,7 +242,7 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB + fb := readerFunc(body) + if fb != nil { + getBody = func() (io.ReadCloser, error) { +- rb := ioutil.NopCloser(fb()) ++ rb := io.NopCloser(fb()) + toCleanup = append(toCleanup, rb) + return rb, nil + } +diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go +index f168ea6d2b7..08e7aacefb6 100644 +--- a/vendor/google.golang.org/api/internal/gensupport/resumable.go ++++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go +@@ -43,8 +43,8 @@ type ResumableUpload struct { + // retries should happen. + ChunkRetryDeadline time.Duration + +- // Track current request invocation ID and attempt count for retry metric +- // headers. ++ // Track current request invocation ID and attempt count for retry metrics ++ // and idempotency headers. + invocationID string + attempts int + } +@@ -81,10 +81,15 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + ++ // TODO(b/274504690): Consider dropping gccl-invocation-id key since it ++ // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). + baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) + req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) + ++ // Set idempotency token header which is used by GCS uploads. ++ req.Header.Set("X-Goog-Gcs-Idempotency-Token", rx.invocationID) ++ + // Google's upload endpoint uses status code 308 for a + // different purpose than the "308 Permanent Redirect" + // since-standardized in RFC 7238. Because of the conflict in +diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go +index 85c7bcbfdfc..693a1b1abaf 100644 +--- a/vendor/google.golang.org/api/internal/gensupport/send.go ++++ b/vendor/google.golang.org/api/internal/gensupport/send.go +@@ -138,9 +138,14 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r + } + return resp, ctx.Err() + } ++ ++ // Set retry metrics and idempotency headers for GCS. ++ // TODO(b/274504690): Consider dropping gccl-invocation-id key since it ++ // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") + req.Header.Set("X-Goog-Api-Client", xGoogHeader) ++ req.Header.Set("X-Goog-Gcs-Idempotency-Token", invocationID) + + resp, err = client.Do(req.WithContext(ctx)) + +diff --git a/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/vendor/google.golang.org/api/internal/impersonate/impersonate.go +index b465bbcd12e..4b2c775f210 100644 +--- a/vendor/google.golang.org/api/internal/impersonate/impersonate.go ++++ b/vendor/google.golang.org/api/internal/impersonate/impersonate.go +@@ -11,7 +11,6 @@ import ( + "encoding/json" + "fmt" + "io" +- "io/ioutil" + "net/http" + "time" + +@@ -105,7 +104,7 @@ func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { + return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) + } + defer resp.Body.Close() +- body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) ++ body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } +diff --git a/vendor/google.golang.org/api/internal/s2a.go b/vendor/google.golang.org/api/internal/s2a.go +new file mode 100644 +index 00000000000..c5b421f5544 +--- /dev/null ++++ b/vendor/google.golang.org/api/internal/s2a.go +@@ -0,0 +1,136 @@ ++// Copyright 2023 Google LLC. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package internal ++ ++import ( ++ "encoding/json" ++ "log" ++ "sync" ++ "time" ++ ++ "cloud.google.com/go/compute/metadata" ++) ++ ++const configEndpointSuffix = "googleAutoMtlsConfiguration" ++ ++// The period an MTLS config can be reused before needing refresh. ++var configExpiry = time.Hour ++ ++// GetS2AAddress returns the S2A address to be reached via plaintext connection. ++func GetS2AAddress() string { ++ c, err := getMetadataMTLSAutoConfig().Config() ++ if err != nil { ++ return "" ++ } ++ if !c.Valid() { ++ return "" ++ } ++ return c.S2A.PlaintextAddress ++} ++ ++type mtlsConfigSource interface { ++ Config() (*mtlsConfig, error) ++} ++ ++// mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. ++var ( ++ mdsMTLSAutoConfigSource mtlsConfigSource ++ once sync.Once ++) ++ ++// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. ++func getMetadataMTLSAutoConfig() mtlsConfigSource { ++ once.Do(func() { ++ mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ ++ src: &metadataMTLSAutoConfig{}, ++ } ++ }) ++ return mdsMTLSAutoConfigSource ++} ++ ++// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. ++// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. ++type reuseMTLSConfigSource struct { ++ src mtlsConfigSource // src.Config() is called when config is expired ++ mu sync.Mutex // mutex guards config ++ config *mtlsConfig // cached config ++} ++ ++func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { ++ cs.mu.Lock() ++ defer cs.mu.Unlock() ++ ++ if cs.config.Valid() { ++ return cs.config, nil ++ } ++ c, err := cs.src.Config() ++ if err != nil { ++ return nil, err ++ } ++ cs.config = c ++ return c, nil ++} ++ ++// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource ++// It has the logic to query MDS and return an mtlsConfig ++type metadataMTLSAutoConfig struct{} ++ ++var httpGetMetadataMTLSConfig = func() (string, error) { ++ return metadata.Get(configEndpointSuffix) ++} ++ ++func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { ++ resp, err := httpGetMetadataMTLSConfig() ++ if err != nil { ++ log.Printf("querying MTLS config from MDS endpoint failed: %v", err) ++ return defaultMTLSConfig(), nil ++ } ++ var config mtlsConfig ++ err = json.Unmarshal([]byte(resp), &config) ++ if err != nil { ++ log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) ++ return defaultMTLSConfig(), nil ++ } ++ ++ if config.S2A == nil { ++ log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) ++ return defaultMTLSConfig(), nil ++ } ++ ++ // set new expiry ++ config.Expiry = time.Now().Add(configExpiry) ++ return &config, nil ++} ++ ++func defaultMTLSConfig() *mtlsConfig { ++ return &mtlsConfig{ ++ S2A: &s2aAddresses{ ++ PlaintextAddress: "", ++ MTLSAddress: "", ++ }, ++ Expiry: time.Now().Add(configExpiry), ++ } ++} ++ ++// s2aAddresses contains the plaintext and/or MTLS S2A addresses. ++type s2aAddresses struct { ++ // PlaintextAddress is the plaintext address to reach S2A ++ PlaintextAddress string `json:"plaintext_address"` ++ // MTLSAddress is the MTLS address to reach S2A ++ MTLSAddress string `json:"mtls_address"` ++} ++ ++// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. ++type mtlsConfig struct { ++ S2A *s2aAddresses `json:"s2a"` ++ Expiry time.Time ++} ++ ++func (c *mtlsConfig) Valid() bool { ++ return c != nil && c.S2A != nil && !c.expired() ++} ++func (c *mtlsConfig) expired() bool { ++ return c.Expiry.Before(time.Now()) ++} +diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go +index 76efdb22772..3a3874df112 100644 +--- a/vendor/google.golang.org/api/internal/settings.go ++++ b/vendor/google.golang.org/api/internal/settings.go +@@ -46,6 +46,7 @@ type DialSettings struct { + SkipValidation bool + ImpersonationConfig *impersonate.Config + EnableDirectPath bool ++ EnableDirectPathXds bool + AllowNonDefaultServiceAccount bool + + // Google API system parameters. For more information please read: +diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go +index 7a4f6d8982e..46ad187ec11 100644 +--- a/vendor/google.golang.org/api/internal/version.go ++++ b/vendor/google.golang.org/api/internal/version.go +@@ -5,4 +5,4 @@ + package internal + + // Version is the current tagged release of the library. +-const Version = "0.114.0" ++const Version = "0.126.0" +diff --git a/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json b/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json +index 6490d4738f4..6a1aab9daf8 100644 +--- a/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json ++++ b/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json +@@ -1217,7 +1217,7 @@ + "type": "string" + }, + "pageSize": { +- "description": "A positive number that is the maximum number of results to return.", ++ "description": "A positive number that is the maximum number of results to return. The default and maximum value is 10,000. If a page_size \u003c= 0 or \u003e 10,000 is submitted, will instead return a maximum of 10,000 results.", + "format": "int32", + "location": "query", + "type": "integer" +@@ -1505,7 +1505,7 @@ + ] + }, + "list": { +- "description": "Lists the notification channels that have been created for the project.", ++ "description": "Lists the notification channels that have been created for the project. To list the types of notification channels that are supported, use the ListNotificationChannelDescriptors method.", + "flatPath": "v3/projects/{projectsId}/notificationChannels", + "httpMethod": "GET", + "id": "monitoring.projects.notificationChannels.list", +@@ -2714,7 +2714,7 @@ + } + } + }, +- "revision": "20230212", ++ "revision": "20230531", + "rootUrl": "https://monitoring.googleapis.com/", + "schemas": { + "Aggregation": { +@@ -2843,7 +2843,7 @@ + "type": "string" + }, + "conditions": { +- "description": "A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition.", ++ "description": "A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. If condition_monitoring_query_language is present, it must be the only condition.", + "items": { + "$ref": "Condition" + }, +@@ -2854,7 +2854,7 @@ + "description": "A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored." + }, + "displayName": { +- "description": "A short name or phrase used to identify the policy in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple policies in the same project. The name is limited to 512 Unicode characters.", ++ "description": "A short name or phrase used to identify the policy in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple policies in the same project. The name is limited to 512 Unicode characters.The convention for the display_name of a PrometheusQueryLanguageCondition is \"/\", where the and should be taken from the corresponding Prometheus configuration file. This convention is not enforced. In any case the display_name is not a unique key of the AlertPolicy.", + "type": "string" + }, + "documentation": { +@@ -2884,12 +2884,12 @@ + "additionalProperties": { + "type": "string" + }, +- "description": "User-supplied key/value data to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.", ++ "description": "User-supplied key/value data to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.Note that Prometheus and are valid Prometheus label names (https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). This means that they cannot be stored as is in user labels, because Prometheus labels may contain upper-case letters.", + "type": "object" + }, + "validity": { + "$ref": "Status", +- "description": "Read-only description of how the alert policy is invalid. OK if the alert policy is valid. If not OK, the alert policy will not generate incidents." ++ "description": "Read-only description of how the alert policy is invalid. This field is only set when the alert policy is invalid. An invalid alert policy will not generate incidents." + } + }, + "type": "object" +@@ -2903,6 +2903,13 @@ + "format": "google-duration", + "type": "string" + }, ++ "notificationChannelStrategy": { ++ "description": "Control how notifications will be sent out, on a per-channel basis.", ++ "items": { ++ "$ref": "NotificationChannelStrategy" ++ }, ++ "type": "array" ++ }, + "notificationRateLimit": { + "$ref": "NotificationRateLimit", + "description": "Required for alert policies with a LogMatch condition.This limit is not implemented for alert policies that are not log-based." +@@ -3504,7 +3511,7 @@ + "type": "object" + }, + "Exponential": { +- "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i). Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", ++ "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i).Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", + "id": "Exponential", + "properties": { + "growthFactor": { +@@ -4033,7 +4040,7 @@ + "type": "object" + }, + "Linear": { +- "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", ++ "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i).Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", + "id": "Linear", + "properties": { + "numFiniteBuckets": { +@@ -4932,6 +4939,25 @@ + }, + "type": "object" + }, ++ "NotificationChannelStrategy": { ++ "description": "Control over how the notification channels in notification_channels are notified when this alert fires, on a per-channel basis.", ++ "id": "NotificationChannelStrategy", ++ "properties": { ++ "notificationChannelNames": { ++ "description": "The full REST resource name for the notification channels that these settings apply to. Each of these correspond to the name field in one of the NotificationChannel objects referenced in the notification_channels field of this AlertPolicy. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] ", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "renotifyInterval": { ++ "description": "The frequency at which to send reminder notifications for open incidents.", ++ "format": "google-duration", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "NotificationRateLimit": { + "description": "Control over the rate of notifications sent to this alert policy's notification channels.", + "id": "NotificationRateLimit", +@@ -5624,6 +5650,10 @@ + "description": "A protocol buffer message type.", + "id": "Type", + "properties": { ++ "edition": { ++ "description": "The source edition string, only valid when syntax is SYNTAX_EDITIONS.", ++ "type": "string" ++ }, + "fields": { + "description": "The list of fields.", + "items": { +@@ -5657,11 +5687,13 @@ + "description": "The source syntax.", + "enum": [ + "SYNTAX_PROTO2", +- "SYNTAX_PROTO3" ++ "SYNTAX_PROTO3", ++ "SYNTAX_EDITIONS" + ], + "enumDescriptions": [ + "Syntax proto2.", +- "Syntax proto3." ++ "Syntax proto3.", ++ "Syntax editions." + ], + "type": "string" + } +diff --git a/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go b/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go +index 6f700c3458a..0c5038567a4 100644 +--- a/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go ++++ b/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go +@@ -77,6 +77,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "monitoring:v3" + const apiName = "monitoring" +@@ -688,7 +689,8 @@ type AlertPolicy struct { + // combined conditions evaluate to true, then an incident is created. A + // policy can have from one to six conditions. If + // condition_time_series_query_language is present, it must be the only +- // condition. ++ // condition. If condition_monitoring_query_language is present, it must ++ // be the only condition. + Conditions []*Condition `json:"conditions,omitempty"` + + // CreationRecord: A read-only record of the creation of the alerting +@@ -699,7 +701,11 @@ type AlertPolicy struct { + // DisplayName: A short name or phrase used to identify the policy in + // dashboards, notifications, and incidents. To avoid confusion, don't + // use the same display name for multiple policies in the same project. +- // The name is limited to 512 Unicode characters. ++ // The name is limited to 512 Unicode characters.The convention for the ++ // display_name of a PrometheusQueryLanguageCondition is "/", where the ++ // and should be taken from the corresponding Prometheus configuration ++ // file. This convention is not enforced. In any case the display_name ++ // is not a unique key of the AlertPolicy. + DisplayName string `json:"displayName,omitempty"` + + // Documentation: Documentation that is included with notifications and +@@ -746,12 +752,16 @@ type AlertPolicy struct { + // 64 entries. Each key and value is limited to 63 Unicode characters or + // 128 bytes, whichever is smaller. Labels and values can contain only + // lowercase letters, numerals, underscores, and dashes. Keys must begin +- // with a letter. ++ // with a letter.Note that Prometheus and are valid Prometheus label ++ // names ++ // (https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). ++ // This means that they cannot be stored as is in user labels, because ++ // Prometheus labels may contain upper-case letters. + UserLabels map[string]string `json:"userLabels,omitempty"` + + // Validity: Read-only description of how the alert policy is invalid. +- // OK if the alert policy is valid. If not OK, the alert policy will not +- // generate incidents. ++ // This field is only set when the alert policy is invalid. An invalid ++ // alert policy will not generate incidents. + Validity *Status `json:"validity,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the +@@ -788,6 +798,10 @@ type AlertStrategy struct { + // long, any open incidents will close + AutoClose string `json:"autoClose,omitempty"` + ++ // NotificationChannelStrategy: Control how notifications will be sent ++ // out, on a per-channel basis. ++ NotificationChannelStrategy []*NotificationChannelStrategy `json:"notificationChannelStrategy,omitempty"` ++ + // NotificationRateLimit: Required for alert policies with a LogMatch + // condition.This limit is not implemented for alert policies that are + // not log-based. +@@ -2004,12 +2018,29 @@ func (s *Explicit) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++func (s *Explicit) UnmarshalJSON(data []byte) error { ++ type NoMethod Explicit ++ var s1 struct { ++ Bounds []gensupport.JSONFloat64 `json:"bounds"` ++ *NoMethod ++ } ++ s1.NoMethod = (*NoMethod)(s) ++ if err := json.Unmarshal(data, &s1); err != nil { ++ return err ++ } ++ s.Bounds = make([]float64, len(s1.Bounds)) ++ for i := range s1.Bounds { ++ s.Bounds[i] = float64(s1.Bounds[i]) ++ } ++ return nil ++} ++ + // Exponential: Specifies an exponential sequence of buckets that have a + // width that is proportional to the value of the lower bound. Each + // bucket represents a constant relative uncertainty on a specific value + // in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket + // i has the following boundaries:Upper bound (0 <= i < N-1): scale * +-// (growth_factor ^ i). Lower bound (1 <= i < N): scale * (growth_factor ++// (growth_factor ^ i).Lower bound (1 <= i < N): scale * (growth_factor + // ^ (i - 1)). + type Exponential struct { + // GrowthFactor: Must be greater than 1. +@@ -2922,7 +2953,7 @@ func (s *LatencyCriteria) MarshalJSON() ([]byte, error) { + // constant absolute uncertainty on the specific value in the + // bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has + // the following boundaries:Upper bound (0 <= i < N-1): offset + (width +-// * i). Lower bound (1 <= i < N): offset + (width * (i - 1)). ++// * i).Lower bound (1 <= i < N): offset + (width * (i - 1)). + type Linear struct { + // NumFiniteBuckets: Must be greater than 0. + NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"` +@@ -4679,6 +4710,47 @@ func (s *NotificationChannelDescriptor) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// NotificationChannelStrategy: Control over how the notification ++// channels in notification_channels are notified when this alert fires, ++// on a per-channel basis. ++type NotificationChannelStrategy struct { ++ // NotificationChannelNames: The full REST resource name for the ++ // notification channels that these settings apply to. Each of these ++ // correspond to the name field in one of the NotificationChannel ++ // objects referenced in the notification_channels field of this ++ // AlertPolicy. The format is: ++ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] ++ NotificationChannelNames []string `json:"notificationChannelNames,omitempty"` ++ ++ // RenotifyInterval: The frequency at which to send reminder ++ // notifications for open incidents. ++ RenotifyInterval string `json:"renotifyInterval,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "NotificationChannelNames") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "NotificationChannelNames") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NotificationChannelStrategy) MarshalJSON() ([]byte, error) { ++ type NoMethod NotificationChannelStrategy ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // NotificationRateLimit: Control over the rate of notifications sent to + // this alert policy's notification channels. + type NotificationRateLimit struct { +@@ -6003,6 +6075,10 @@ func (s *Trigger) UnmarshalJSON(data []byte) error { + + // Type: A protocol buffer message type. + type Type struct { ++ // Edition: The source edition string, only valid when syntax is ++ // SYNTAX_EDITIONS. ++ Edition string `json:"edition,omitempty"` ++ + // Fields: The list of fields. + Fields []*Field `json:"fields,omitempty"` + +@@ -6024,9 +6100,10 @@ type Type struct { + // Possible values: + // "SYNTAX_PROTO2" - Syntax proto2. + // "SYNTAX_PROTO3" - Syntax proto3. ++ // "SYNTAX_EDITIONS" - Syntax editions. + Syntax string `json:"syntax,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "Fields") to ++ // ForceSendFields is a list of field names (e.g. "Edition") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -6034,8 +6111,8 @@ type Type struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Fields") to include in API +- // requests with the JSON null value. By default, fields with empty ++ // NullFields is a list of field names (e.g. "Edition") to include in ++ // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. +@@ -11235,7 +11312,9 @@ func (c *ProjectsMetricDescriptorsListCall) Filter(filter string) *ProjectsMetri + } + + // PageSize sets the optional parameter "pageSize": A positive number +-// that is the maximum number of results to return. ++// that is the maximum number of results to return. The default and ++// maximum value is 10,000. If a page_size <= 0 or > 10,000 is ++// submitted, will instead return a maximum of 10,000 results. + func (c *ProjectsMetricDescriptorsListCall) PageSize(pageSize int64) *ProjectsMetricDescriptorsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +@@ -11370,7 +11449,7 @@ func (c *ProjectsMetricDescriptorsListCall) Do(opts ...googleapi.CallOption) (*L + // "type": "string" + // }, + // "pageSize": { +- // "description": "A positive number that is the maximum number of results to return.", ++ // "description": "A positive number that is the maximum number of results to return. The default and maximum value is 10,000. If a page_size \u003c= 0 or \u003e 10,000 is submitted, will instead return a maximum of 10,000 results.", + // "format": "int32", + // "location": "query", + // "type": "integer" +@@ -12787,7 +12866,8 @@ type ProjectsNotificationChannelsListCall struct { + } + + // List: Lists the notification channels that have been created for the +-// project. ++// project. To list the types of notification channels that are ++// supported, use the ListNotificationChannelDescriptors method. + // + // - name: The project + // (https://cloud.google.com/monitoring/api/v3#project_name) on which +@@ -12938,7 +13018,7 @@ func (c *ProjectsNotificationChannelsListCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Lists the notification channels that have been created for the project.", ++ // "description": "Lists the notification channels that have been created for the project. To list the types of notification channels that are supported, use the ListNotificationChannelDescriptors method.", + // "flatPath": "v3/projects/{projectsId}/notificationChannels", + // "httpMethod": "GET", + // "id": "monitoring.projects.notificationChannels.list", +diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go +index cc7ebfe277b..3b8461d1da9 100644 +--- a/vendor/google.golang.org/api/option/internaloption/internaloption.go ++++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go +@@ -67,6 +67,21 @@ func (e enableDirectPath) Apply(o *internal.DialSettings) { + o.EnableDirectPath = bool(e) + } + ++// EnableDirectPathXds returns a ClientOption that overrides the default ++// DirectPath type. It is only valid when DirectPath is enabled. ++// ++// It should only be used internally by generated clients. ++// This is an EXPERIMENTAL API and may be changed or removed in the future. ++func EnableDirectPathXds() option.ClientOption { ++ return enableDirectPathXds(true) ++} ++ ++type enableDirectPathXds bool ++ ++func (x enableDirectPathXds) Apply(o *internal.DialSettings) { ++ o.EnableDirectPathXds = bool(x) ++} ++ + // AllowNonDefaultServiceAccount returns a ClientOption that overrides the default + // requirement for using the default service account for DirectPath. + // +diff --git a/vendor/google.golang.org/api/tpu/v1/tpu-api.json b/vendor/google.golang.org/api/tpu/v1/tpu-api.json +index 3dab07c96aa..820cbf9681d 100644 +--- a/vendor/google.golang.org/api/tpu/v1/tpu-api.json ++++ b/vendor/google.golang.org/api/tpu/v1/tpu-api.json +@@ -537,7 +537,7 @@ + ] + }, + "list": { +- "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", ++ "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", + "httpMethod": "GET", + "id": "tpu.projects.locations.operations.list", +@@ -659,7 +659,7 @@ + } + } + }, +- "revision": "20220725", ++ "revision": "20230420", + "rootUrl": "https://tpu.googleapis.com/", + "schemas": { + "AcceleratorType": { +@@ -795,7 +795,7 @@ + "type": "object" + }, + "Location": { +- "description": "A resource that represents Google Cloud Platform location.", ++ "description": "A resource that represents a Google Cloud location.", + "id": "Location", + "properties": { + "displayName": { +diff --git a/vendor/google.golang.org/api/tpu/v1/tpu-gen.go b/vendor/google.golang.org/api/tpu/v1/tpu-gen.go +index 12b03349403..733464667d2 100644 +--- a/vendor/google.golang.org/api/tpu/v1/tpu-gen.go ++++ b/vendor/google.golang.org/api/tpu/v1/tpu-gen.go +@@ -71,6 +71,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "tpu:v1" + const apiName = "tpu" +@@ -442,7 +443,7 @@ func (s *ListTensorFlowVersionsResponse) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// Location: A resource that represents Google Cloud Platform location. ++// Location: A resource that represents a Google Cloud location. + type Location struct { + // DisplayName: The friendly name for this location, typically a nearby + // city name. For example, "Tokyo". +@@ -3186,14 +3187,7 @@ type ProjectsLocationsOperationsListCall struct { + + // List: Lists operations that match the specified filter in the + // request. If the server doesn't support this method, it returns +-// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +-// override the binding to use different resource name schemes, such as +-// `users/*/operations`. To override the binding, API services can add a +-// binding such as "/v1/{name=users/*}/operations" to their service +-// configuration. For backwards compatibility, the default name includes +-// the operations collection id, however overriding users must ensure +-// the name binding is the parent resource, without the operations +-// collection id. ++// `UNIMPLEMENTED`. + // + // - name: The name of the operation's parent resource. + func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall { +@@ -3322,7 +3316,7 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( + } + return ret, nil + // { +- // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", ++ // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", + // "httpMethod": "GET", + // "id": "tpu.projects.locations.operations.list", +diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go +index 403509d08f6..eca0c3ba795 100644 +--- a/vendor/google.golang.org/api/transport/http/dial.go ++++ b/vendor/google.golang.org/api/transport/http/dial.go +@@ -33,7 +33,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, + if err != nil { + return nil, "", err + } +- clientCertSource, endpoint, err := internal.GetClientCertificateSourceAndEndpoint(settings) ++ clientCertSource, dialTLSContext, endpoint, err := internal.GetHTTPTransportConfigAndEndpoint(settings) + if err != nil { + return nil, "", err + } +@@ -41,7 +41,8 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, + if settings.HTTPClient != nil { + return settings.HTTPClient, endpoint, nil + } +- trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource), settings) ++ ++ trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource, dialTLSContext), settings) + if err != nil { + return nil, "", err + } +@@ -152,7 +153,7 @@ var appengineUrlfetchHook func(context.Context) http.RoundTripper + // Otherwise, use a default transport, taking most defaults from + // http.DefaultTransport. + // If TLSCertificate is available, set TLSClientConfig as well. +-func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper { ++func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { + if appengineUrlfetchHook != nil { + return appengineUrlfetchHook(ctx) + } +@@ -171,6 +172,10 @@ func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) htt + GetClientCertificate: clientCertSource, + } + } ++ if dialTLSContext != nil { ++ // If DialTLSContext is set, TLSClientConfig wil be ignored ++ trans.DialTLSContext = dialTLSContext ++ } + + configureHTTP2(trans) + +diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go +new file mode 100644 +index 00000000000..4ec872e4606 +--- /dev/null ++++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go +@@ -0,0 +1,2822 @@ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// source: google.golang.org/appengine/internal/socket/socket_service.proto ++ ++package socket ++ ++import proto "github.com/golang/protobuf/proto" ++import fmt "fmt" ++import math "math" ++ ++// Reference imports to suppress errors if they are not otherwise used. ++var _ = proto.Marshal ++var _ = fmt.Errorf ++var _ = math.Inf ++ ++// This is a compile-time assertion to ensure that this generated file ++// is compatible with the proto package it is being compiled against. ++// A compilation error at this line likely means your copy of the ++// proto package needs to be updated. ++const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package ++ ++type RemoteSocketServiceError_ErrorCode int32 ++ ++const ( ++ RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 ++ RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 ++ RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 ++ RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 ++ RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 ++ RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 ++) ++ ++var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ ++ 1: "SYSTEM_ERROR", ++ 2: "GAI_ERROR", ++ 4: "FAILURE", ++ 5: "PERMISSION_DENIED", ++ 6: "INVALID_REQUEST", ++ 7: "SOCKET_CLOSED", ++} ++var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ ++ "SYSTEM_ERROR": 1, ++ "GAI_ERROR": 2, ++ "FAILURE": 4, ++ "PERMISSION_DENIED": 5, ++ "INVALID_REQUEST": 6, ++ "SOCKET_CLOSED": 7, ++} ++ ++func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { ++ p := new(RemoteSocketServiceError_ErrorCode) ++ *p = x ++ return p ++} ++func (x RemoteSocketServiceError_ErrorCode) String() string { ++ return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) ++} ++func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") ++ if err != nil { ++ return err ++ } ++ *x = RemoteSocketServiceError_ErrorCode(value) ++ return nil ++} ++func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} ++} ++ ++type RemoteSocketServiceError_SystemError int32 ++ ++const ( ++ RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 ++ RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 ++ RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 ++ RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 ++ RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 ++ RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 ++ RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 ++ RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 ++ RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 ++ RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 ++ RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 ++ RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 ++ RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 ++ RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 ++ RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 ++ RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 ++ RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 ++ RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 ++ RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 ++ RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 ++ RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 ++ RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 ++ RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 ++ RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 ++ RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 ++ RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 ++ RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 ++ RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 ++ RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 ++ RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 ++ RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 ++ RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 ++ RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 ++ RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 ++ RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 ++ RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 ++ RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 ++ RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 ++ RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 ++ RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 ++ RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 ++ RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 ++ RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 ++ RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 ++ RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 ++ RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 ++ RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 ++ RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 ++ RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 ++ RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 ++ RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 ++ RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 ++ RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 ++ RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 ++ RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 ++ RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 ++ RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 ++ RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 ++ RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 ++ RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 ++ RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 ++ RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 ++ RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 ++ RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 ++ RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 ++ RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 ++ RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 ++ RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 ++ RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 ++ RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 ++ RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 ++ RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 ++ RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 ++ RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 ++ RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 ++ RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 ++ RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 ++ RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 ++ RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 ++ RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 ++ RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 ++ RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 ++ RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 ++ RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 ++ RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 ++ RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 ++ RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 ++ RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 ++ RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 ++ RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 ++ RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 ++ RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 ++ RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 ++ RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 ++ RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 ++ RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 ++ RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 ++ RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 ++ RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 ++ RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 ++ RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 ++ RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 ++ RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 ++ RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 ++ RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 ++ RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 ++ RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 ++ RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 ++ RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 ++ RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 ++ RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 ++ RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 ++ RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 ++ RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 ++ RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 ++ RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 ++ RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 ++ RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 ++ RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 ++ RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 ++ RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 ++ RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 ++ RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 ++ RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 ++ RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 ++ RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 ++ RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 ++ RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 ++ RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 ++ RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 ++ RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 ++ RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 ++ RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 ++ RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 ++) ++ ++var RemoteSocketServiceError_SystemError_name = map[int32]string{ ++ 0: "SYS_SUCCESS", ++ 1: "SYS_EPERM", ++ 2: "SYS_ENOENT", ++ 3: "SYS_ESRCH", ++ 4: "SYS_EINTR", ++ 5: "SYS_EIO", ++ 6: "SYS_ENXIO", ++ 7: "SYS_E2BIG", ++ 8: "SYS_ENOEXEC", ++ 9: "SYS_EBADF", ++ 10: "SYS_ECHILD", ++ 11: "SYS_EAGAIN", ++ // Duplicate value: 11: "SYS_EWOULDBLOCK", ++ 12: "SYS_ENOMEM", ++ 13: "SYS_EACCES", ++ 14: "SYS_EFAULT", ++ 15: "SYS_ENOTBLK", ++ 16: "SYS_EBUSY", ++ 17: "SYS_EEXIST", ++ 18: "SYS_EXDEV", ++ 19: "SYS_ENODEV", ++ 20: "SYS_ENOTDIR", ++ 21: "SYS_EISDIR", ++ 22: "SYS_EINVAL", ++ 23: "SYS_ENFILE", ++ 24: "SYS_EMFILE", ++ 25: "SYS_ENOTTY", ++ 26: "SYS_ETXTBSY", ++ 27: "SYS_EFBIG", ++ 28: "SYS_ENOSPC", ++ 29: "SYS_ESPIPE", ++ 30: "SYS_EROFS", ++ 31: "SYS_EMLINK", ++ 32: "SYS_EPIPE", ++ 33: "SYS_EDOM", ++ 34: "SYS_ERANGE", ++ 35: "SYS_EDEADLK", ++ // Duplicate value: 35: "SYS_EDEADLOCK", ++ 36: "SYS_ENAMETOOLONG", ++ 37: "SYS_ENOLCK", ++ 38: "SYS_ENOSYS", ++ 39: "SYS_ENOTEMPTY", ++ 40: "SYS_ELOOP", ++ 42: "SYS_ENOMSG", ++ 43: "SYS_EIDRM", ++ 44: "SYS_ECHRNG", ++ 45: "SYS_EL2NSYNC", ++ 46: "SYS_EL3HLT", ++ 47: "SYS_EL3RST", ++ 48: "SYS_ELNRNG", ++ 49: "SYS_EUNATCH", ++ 50: "SYS_ENOCSI", ++ 51: "SYS_EL2HLT", ++ 52: "SYS_EBADE", ++ 53: "SYS_EBADR", ++ 54: "SYS_EXFULL", ++ 55: "SYS_ENOANO", ++ 56: "SYS_EBADRQC", ++ 57: "SYS_EBADSLT", ++ 59: "SYS_EBFONT", ++ 60: "SYS_ENOSTR", ++ 61: "SYS_ENODATA", ++ 62: "SYS_ETIME", ++ 63: "SYS_ENOSR", ++ 64: "SYS_ENONET", ++ 65: "SYS_ENOPKG", ++ 66: "SYS_EREMOTE", ++ 67: "SYS_ENOLINK", ++ 68: "SYS_EADV", ++ 69: "SYS_ESRMNT", ++ 70: "SYS_ECOMM", ++ 71: "SYS_EPROTO", ++ 72: "SYS_EMULTIHOP", ++ 73: "SYS_EDOTDOT", ++ 74: "SYS_EBADMSG", ++ 75: "SYS_EOVERFLOW", ++ 76: "SYS_ENOTUNIQ", ++ 77: "SYS_EBADFD", ++ 78: "SYS_EREMCHG", ++ 79: "SYS_ELIBACC", ++ 80: "SYS_ELIBBAD", ++ 81: "SYS_ELIBSCN", ++ 82: "SYS_ELIBMAX", ++ 83: "SYS_ELIBEXEC", ++ 84: "SYS_EILSEQ", ++ 85: "SYS_ERESTART", ++ 86: "SYS_ESTRPIPE", ++ 87: "SYS_EUSERS", ++ 88: "SYS_ENOTSOCK", ++ 89: "SYS_EDESTADDRREQ", ++ 90: "SYS_EMSGSIZE", ++ 91: "SYS_EPROTOTYPE", ++ 92: "SYS_ENOPROTOOPT", ++ 93: "SYS_EPROTONOSUPPORT", ++ 94: "SYS_ESOCKTNOSUPPORT", ++ 95: "SYS_EOPNOTSUPP", ++ // Duplicate value: 95: "SYS_ENOTSUP", ++ 96: "SYS_EPFNOSUPPORT", ++ 97: "SYS_EAFNOSUPPORT", ++ 98: "SYS_EADDRINUSE", ++ 99: "SYS_EADDRNOTAVAIL", ++ 100: "SYS_ENETDOWN", ++ 101: "SYS_ENETUNREACH", ++ 102: "SYS_ENETRESET", ++ 103: "SYS_ECONNABORTED", ++ 104: "SYS_ECONNRESET", ++ 105: "SYS_ENOBUFS", ++ 106: "SYS_EISCONN", ++ 107: "SYS_ENOTCONN", ++ 108: "SYS_ESHUTDOWN", ++ 109: "SYS_ETOOMANYREFS", ++ 110: "SYS_ETIMEDOUT", ++ 111: "SYS_ECONNREFUSED", ++ 112: "SYS_EHOSTDOWN", ++ 113: "SYS_EHOSTUNREACH", ++ 114: "SYS_EALREADY", ++ 115: "SYS_EINPROGRESS", ++ 116: "SYS_ESTALE", ++ 117: "SYS_EUCLEAN", ++ 118: "SYS_ENOTNAM", ++ 119: "SYS_ENAVAIL", ++ 120: "SYS_EISNAM", ++ 121: "SYS_EREMOTEIO", ++ 122: "SYS_EDQUOT", ++ 123: "SYS_ENOMEDIUM", ++ 124: "SYS_EMEDIUMTYPE", ++ 125: "SYS_ECANCELED", ++ 126: "SYS_ENOKEY", ++ 127: "SYS_EKEYEXPIRED", ++ 128: "SYS_EKEYREVOKED", ++ 129: "SYS_EKEYREJECTED", ++ 130: "SYS_EOWNERDEAD", ++ 131: "SYS_ENOTRECOVERABLE", ++ 132: "SYS_ERFKILL", ++} ++var RemoteSocketServiceError_SystemError_value = map[string]int32{ ++ "SYS_SUCCESS": 0, ++ "SYS_EPERM": 1, ++ "SYS_ENOENT": 2, ++ "SYS_ESRCH": 3, ++ "SYS_EINTR": 4, ++ "SYS_EIO": 5, ++ "SYS_ENXIO": 6, ++ "SYS_E2BIG": 7, ++ "SYS_ENOEXEC": 8, ++ "SYS_EBADF": 9, ++ "SYS_ECHILD": 10, ++ "SYS_EAGAIN": 11, ++ "SYS_EWOULDBLOCK": 11, ++ "SYS_ENOMEM": 12, ++ "SYS_EACCES": 13, ++ "SYS_EFAULT": 14, ++ "SYS_ENOTBLK": 15, ++ "SYS_EBUSY": 16, ++ "SYS_EEXIST": 17, ++ "SYS_EXDEV": 18, ++ "SYS_ENODEV": 19, ++ "SYS_ENOTDIR": 20, ++ "SYS_EISDIR": 21, ++ "SYS_EINVAL": 22, ++ "SYS_ENFILE": 23, ++ "SYS_EMFILE": 24, ++ "SYS_ENOTTY": 25, ++ "SYS_ETXTBSY": 26, ++ "SYS_EFBIG": 27, ++ "SYS_ENOSPC": 28, ++ "SYS_ESPIPE": 29, ++ "SYS_EROFS": 30, ++ "SYS_EMLINK": 31, ++ "SYS_EPIPE": 32, ++ "SYS_EDOM": 33, ++ "SYS_ERANGE": 34, ++ "SYS_EDEADLK": 35, ++ "SYS_EDEADLOCK": 35, ++ "SYS_ENAMETOOLONG": 36, ++ "SYS_ENOLCK": 37, ++ "SYS_ENOSYS": 38, ++ "SYS_ENOTEMPTY": 39, ++ "SYS_ELOOP": 40, ++ "SYS_ENOMSG": 42, ++ "SYS_EIDRM": 43, ++ "SYS_ECHRNG": 44, ++ "SYS_EL2NSYNC": 45, ++ "SYS_EL3HLT": 46, ++ "SYS_EL3RST": 47, ++ "SYS_ELNRNG": 48, ++ "SYS_EUNATCH": 49, ++ "SYS_ENOCSI": 50, ++ "SYS_EL2HLT": 51, ++ "SYS_EBADE": 52, ++ "SYS_EBADR": 53, ++ "SYS_EXFULL": 54, ++ "SYS_ENOANO": 55, ++ "SYS_EBADRQC": 56, ++ "SYS_EBADSLT": 57, ++ "SYS_EBFONT": 59, ++ "SYS_ENOSTR": 60, ++ "SYS_ENODATA": 61, ++ "SYS_ETIME": 62, ++ "SYS_ENOSR": 63, ++ "SYS_ENONET": 64, ++ "SYS_ENOPKG": 65, ++ "SYS_EREMOTE": 66, ++ "SYS_ENOLINK": 67, ++ "SYS_EADV": 68, ++ "SYS_ESRMNT": 69, ++ "SYS_ECOMM": 70, ++ "SYS_EPROTO": 71, ++ "SYS_EMULTIHOP": 72, ++ "SYS_EDOTDOT": 73, ++ "SYS_EBADMSG": 74, ++ "SYS_EOVERFLOW": 75, ++ "SYS_ENOTUNIQ": 76, ++ "SYS_EBADFD": 77, ++ "SYS_EREMCHG": 78, ++ "SYS_ELIBACC": 79, ++ "SYS_ELIBBAD": 80, ++ "SYS_ELIBSCN": 81, ++ "SYS_ELIBMAX": 82, ++ "SYS_ELIBEXEC": 83, ++ "SYS_EILSEQ": 84, ++ "SYS_ERESTART": 85, ++ "SYS_ESTRPIPE": 86, ++ "SYS_EUSERS": 87, ++ "SYS_ENOTSOCK": 88, ++ "SYS_EDESTADDRREQ": 89, ++ "SYS_EMSGSIZE": 90, ++ "SYS_EPROTOTYPE": 91, ++ "SYS_ENOPROTOOPT": 92, ++ "SYS_EPROTONOSUPPORT": 93, ++ "SYS_ESOCKTNOSUPPORT": 94, ++ "SYS_EOPNOTSUPP": 95, ++ "SYS_ENOTSUP": 95, ++ "SYS_EPFNOSUPPORT": 96, ++ "SYS_EAFNOSUPPORT": 97, ++ "SYS_EADDRINUSE": 98, ++ "SYS_EADDRNOTAVAIL": 99, ++ "SYS_ENETDOWN": 100, ++ "SYS_ENETUNREACH": 101, ++ "SYS_ENETRESET": 102, ++ "SYS_ECONNABORTED": 103, ++ "SYS_ECONNRESET": 104, ++ "SYS_ENOBUFS": 105, ++ "SYS_EISCONN": 106, ++ "SYS_ENOTCONN": 107, ++ "SYS_ESHUTDOWN": 108, ++ "SYS_ETOOMANYREFS": 109, ++ "SYS_ETIMEDOUT": 110, ++ "SYS_ECONNREFUSED": 111, ++ "SYS_EHOSTDOWN": 112, ++ "SYS_EHOSTUNREACH": 113, ++ "SYS_EALREADY": 114, ++ "SYS_EINPROGRESS": 115, ++ "SYS_ESTALE": 116, ++ "SYS_EUCLEAN": 117, ++ "SYS_ENOTNAM": 118, ++ "SYS_ENAVAIL": 119, ++ "SYS_EISNAM": 120, ++ "SYS_EREMOTEIO": 121, ++ "SYS_EDQUOT": 122, ++ "SYS_ENOMEDIUM": 123, ++ "SYS_EMEDIUMTYPE": 124, ++ "SYS_ECANCELED": 125, ++ "SYS_ENOKEY": 126, ++ "SYS_EKEYEXPIRED": 127, ++ "SYS_EKEYREVOKED": 128, ++ "SYS_EKEYREJECTED": 129, ++ "SYS_EOWNERDEAD": 130, ++ "SYS_ENOTRECOVERABLE": 131, ++ "SYS_ERFKILL": 132, ++} ++ ++func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { ++ p := new(RemoteSocketServiceError_SystemError) ++ *p = x ++ return p ++} ++func (x RemoteSocketServiceError_SystemError) String() string { ++ return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) ++} ++func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") ++ if err != nil { ++ return err ++ } ++ *x = RemoteSocketServiceError_SystemError(value) ++ return nil ++} ++func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} ++} ++ ++type CreateSocketRequest_SocketFamily int32 ++ ++const ( ++ CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 ++ CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 ++) ++ ++var CreateSocketRequest_SocketFamily_name = map[int32]string{ ++ 1: "IPv4", ++ 2: "IPv6", ++} ++var CreateSocketRequest_SocketFamily_value = map[string]int32{ ++ "IPv4": 1, ++ "IPv6": 2, ++} ++ ++func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { ++ p := new(CreateSocketRequest_SocketFamily) ++ *p = x ++ return p ++} ++func (x CreateSocketRequest_SocketFamily) String() string { ++ return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) ++} ++func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") ++ if err != nil { ++ return err ++ } ++ *x = CreateSocketRequest_SocketFamily(value) ++ return nil ++} ++func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} ++} ++ ++type CreateSocketRequest_SocketProtocol int32 ++ ++const ( ++ CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 ++ CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 ++) ++ ++var CreateSocketRequest_SocketProtocol_name = map[int32]string{ ++ 1: "TCP", ++ 2: "UDP", ++} ++var CreateSocketRequest_SocketProtocol_value = map[string]int32{ ++ "TCP": 1, ++ "UDP": 2, ++} ++ ++func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { ++ p := new(CreateSocketRequest_SocketProtocol) ++ *p = x ++ return p ++} ++func (x CreateSocketRequest_SocketProtocol) String() string { ++ return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) ++} ++func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") ++ if err != nil { ++ return err ++ } ++ *x = CreateSocketRequest_SocketProtocol(value) ++ return nil ++} ++func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} ++} ++ ++type SocketOption_SocketOptionLevel int32 ++ ++const ( ++ SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 ++ SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 ++ SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 ++ SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 ++) ++ ++var SocketOption_SocketOptionLevel_name = map[int32]string{ ++ 0: "SOCKET_SOL_IP", ++ 1: "SOCKET_SOL_SOCKET", ++ 6: "SOCKET_SOL_TCP", ++ 17: "SOCKET_SOL_UDP", ++} ++var SocketOption_SocketOptionLevel_value = map[string]int32{ ++ "SOCKET_SOL_IP": 0, ++ "SOCKET_SOL_SOCKET": 1, ++ "SOCKET_SOL_TCP": 6, ++ "SOCKET_SOL_UDP": 17, ++} ++ ++func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { ++ p := new(SocketOption_SocketOptionLevel) ++ *p = x ++ return p ++} ++func (x SocketOption_SocketOptionLevel) String() string { ++ return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) ++} ++func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") ++ if err != nil { ++ return err ++ } ++ *x = SocketOption_SocketOptionLevel(value) ++ return nil ++} ++func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} ++} ++ ++type SocketOption_SocketOptionName int32 ++ ++const ( ++ SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 ++ SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 ++ SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 ++ SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 ++ SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 ++ SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 ++ SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 ++ SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 ++ SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 ++ SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 ++ SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 ++ SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 ++ SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 ++ SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 ++ SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 ++ SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 ++ SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 ++ SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 ++ SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 ++ SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 ++ SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 ++ SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 ++ SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 ++ SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 ++ SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 ++ SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 ++ SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 ++ SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 ++ SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 ++) ++ ++var SocketOption_SocketOptionName_name = map[int32]string{ ++ 1: "SOCKET_SO_DEBUG", ++ 2: "SOCKET_SO_REUSEADDR", ++ 3: "SOCKET_SO_TYPE", ++ 4: "SOCKET_SO_ERROR", ++ 5: "SOCKET_SO_DONTROUTE", ++ 6: "SOCKET_SO_BROADCAST", ++ 7: "SOCKET_SO_SNDBUF", ++ 8: "SOCKET_SO_RCVBUF", ++ 9: "SOCKET_SO_KEEPALIVE", ++ 10: "SOCKET_SO_OOBINLINE", ++ 13: "SOCKET_SO_LINGER", ++ 20: "SOCKET_SO_RCVTIMEO", ++ 21: "SOCKET_SO_SNDTIMEO", ++ // Duplicate value: 1: "SOCKET_IP_TOS", ++ // Duplicate value: 2: "SOCKET_IP_TTL", ++ // Duplicate value: 3: "SOCKET_IP_HDRINCL", ++ // Duplicate value: 4: "SOCKET_IP_OPTIONS", ++ // Duplicate value: 1: "SOCKET_TCP_NODELAY", ++ // Duplicate value: 2: "SOCKET_TCP_MAXSEG", ++ // Duplicate value: 3: "SOCKET_TCP_CORK", ++ // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", ++ // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", ++ // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", ++ // Duplicate value: 7: "SOCKET_TCP_SYNCNT", ++ // Duplicate value: 8: "SOCKET_TCP_LINGER2", ++ // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", ++ // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", ++ 11: "SOCKET_TCP_INFO", ++ 12: "SOCKET_TCP_QUICKACK", ++} ++var SocketOption_SocketOptionName_value = map[string]int32{ ++ "SOCKET_SO_DEBUG": 1, ++ "SOCKET_SO_REUSEADDR": 2, ++ "SOCKET_SO_TYPE": 3, ++ "SOCKET_SO_ERROR": 4, ++ "SOCKET_SO_DONTROUTE": 5, ++ "SOCKET_SO_BROADCAST": 6, ++ "SOCKET_SO_SNDBUF": 7, ++ "SOCKET_SO_RCVBUF": 8, ++ "SOCKET_SO_KEEPALIVE": 9, ++ "SOCKET_SO_OOBINLINE": 10, ++ "SOCKET_SO_LINGER": 13, ++ "SOCKET_SO_RCVTIMEO": 20, ++ "SOCKET_SO_SNDTIMEO": 21, ++ "SOCKET_IP_TOS": 1, ++ "SOCKET_IP_TTL": 2, ++ "SOCKET_IP_HDRINCL": 3, ++ "SOCKET_IP_OPTIONS": 4, ++ "SOCKET_TCP_NODELAY": 1, ++ "SOCKET_TCP_MAXSEG": 2, ++ "SOCKET_TCP_CORK": 3, ++ "SOCKET_TCP_KEEPIDLE": 4, ++ "SOCKET_TCP_KEEPINTVL": 5, ++ "SOCKET_TCP_KEEPCNT": 6, ++ "SOCKET_TCP_SYNCNT": 7, ++ "SOCKET_TCP_LINGER2": 8, ++ "SOCKET_TCP_DEFER_ACCEPT": 9, ++ "SOCKET_TCP_WINDOW_CLAMP": 10, ++ "SOCKET_TCP_INFO": 11, ++ "SOCKET_TCP_QUICKACK": 12, ++} ++ ++func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { ++ p := new(SocketOption_SocketOptionName) ++ *p = x ++ return p ++} ++func (x SocketOption_SocketOptionName) String() string { ++ return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) ++} ++func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") ++ if err != nil { ++ return err ++ } ++ *x = SocketOption_SocketOptionName(value) ++ return nil ++} ++func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} ++} ++ ++type ShutDownRequest_How int32 ++ ++const ( ++ ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 ++ ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 ++ ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 ++) ++ ++var ShutDownRequest_How_name = map[int32]string{ ++ 1: "SOCKET_SHUT_RD", ++ 2: "SOCKET_SHUT_WR", ++ 3: "SOCKET_SHUT_RDWR", ++} ++var ShutDownRequest_How_value = map[string]int32{ ++ "SOCKET_SHUT_RD": 1, ++ "SOCKET_SHUT_WR": 2, ++ "SOCKET_SHUT_RDWR": 3, ++} ++ ++func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { ++ p := new(ShutDownRequest_How) ++ *p = x ++ return p ++} ++func (x ShutDownRequest_How) String() string { ++ return proto.EnumName(ShutDownRequest_How_name, int32(x)) ++} ++func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") ++ if err != nil { ++ return err ++ } ++ *x = ShutDownRequest_How(value) ++ return nil ++} ++func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} ++} ++ ++type ReceiveRequest_Flags int32 ++ ++const ( ++ ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 ++ ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 ++) ++ ++var ReceiveRequest_Flags_name = map[int32]string{ ++ 1: "MSG_OOB", ++ 2: "MSG_PEEK", ++} ++var ReceiveRequest_Flags_value = map[string]int32{ ++ "MSG_OOB": 1, ++ "MSG_PEEK": 2, ++} ++ ++func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { ++ p := new(ReceiveRequest_Flags) ++ *p = x ++ return p ++} ++func (x ReceiveRequest_Flags) String() string { ++ return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) ++} ++func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") ++ if err != nil { ++ return err ++ } ++ *x = ReceiveRequest_Flags(value) ++ return nil ++} ++func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} ++} ++ ++type PollEvent_PollEventFlag int32 ++ ++const ( ++ PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 ++ PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 ++ PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 ++ PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 ++ PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 ++ PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 ++ PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 ++ PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 ++ PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 ++ PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 ++ PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 ++ PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 ++ PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 ++ PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 ++) ++ ++var PollEvent_PollEventFlag_name = map[int32]string{ ++ 0: "SOCKET_POLLNONE", ++ 1: "SOCKET_POLLIN", ++ 2: "SOCKET_POLLPRI", ++ 4: "SOCKET_POLLOUT", ++ 8: "SOCKET_POLLERR", ++ 16: "SOCKET_POLLHUP", ++ 32: "SOCKET_POLLNVAL", ++ 64: "SOCKET_POLLRDNORM", ++ 128: "SOCKET_POLLRDBAND", ++ 256: "SOCKET_POLLWRNORM", ++ 512: "SOCKET_POLLWRBAND", ++ 1024: "SOCKET_POLLMSG", ++ 4096: "SOCKET_POLLREMOVE", ++ 8192: "SOCKET_POLLRDHUP", ++} ++var PollEvent_PollEventFlag_value = map[string]int32{ ++ "SOCKET_POLLNONE": 0, ++ "SOCKET_POLLIN": 1, ++ "SOCKET_POLLPRI": 2, ++ "SOCKET_POLLOUT": 4, ++ "SOCKET_POLLERR": 8, ++ "SOCKET_POLLHUP": 16, ++ "SOCKET_POLLNVAL": 32, ++ "SOCKET_POLLRDNORM": 64, ++ "SOCKET_POLLRDBAND": 128, ++ "SOCKET_POLLWRNORM": 256, ++ "SOCKET_POLLWRBAND": 512, ++ "SOCKET_POLLMSG": 1024, ++ "SOCKET_POLLREMOVE": 4096, ++ "SOCKET_POLLRDHUP": 8192, ++} ++ ++func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { ++ p := new(PollEvent_PollEventFlag) ++ *p = x ++ return p ++} ++func (x PollEvent_PollEventFlag) String() string { ++ return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) ++} ++func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") ++ if err != nil { ++ return err ++ } ++ *x = PollEvent_PollEventFlag(value) ++ return nil ++} ++func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} ++} ++ ++type ResolveReply_ErrorCode int32 ++ ++const ( ++ ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 ++ ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 ++ ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 ++ ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 ++ ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 ++ ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 ++ ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 ++ ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 ++ ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 ++ ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 ++ ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 ++ ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 ++ ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 ++ ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 ++ ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 ++) ++ ++var ResolveReply_ErrorCode_name = map[int32]string{ ++ 1: "SOCKET_EAI_ADDRFAMILY", ++ 2: "SOCKET_EAI_AGAIN", ++ 3: "SOCKET_EAI_BADFLAGS", ++ 4: "SOCKET_EAI_FAIL", ++ 5: "SOCKET_EAI_FAMILY", ++ 6: "SOCKET_EAI_MEMORY", ++ 7: "SOCKET_EAI_NODATA", ++ 8: "SOCKET_EAI_NONAME", ++ 9: "SOCKET_EAI_SERVICE", ++ 10: "SOCKET_EAI_SOCKTYPE", ++ 11: "SOCKET_EAI_SYSTEM", ++ 12: "SOCKET_EAI_BADHINTS", ++ 13: "SOCKET_EAI_PROTOCOL", ++ 14: "SOCKET_EAI_OVERFLOW", ++ 15: "SOCKET_EAI_MAX", ++} ++var ResolveReply_ErrorCode_value = map[string]int32{ ++ "SOCKET_EAI_ADDRFAMILY": 1, ++ "SOCKET_EAI_AGAIN": 2, ++ "SOCKET_EAI_BADFLAGS": 3, ++ "SOCKET_EAI_FAIL": 4, ++ "SOCKET_EAI_FAMILY": 5, ++ "SOCKET_EAI_MEMORY": 6, ++ "SOCKET_EAI_NODATA": 7, ++ "SOCKET_EAI_NONAME": 8, ++ "SOCKET_EAI_SERVICE": 9, ++ "SOCKET_EAI_SOCKTYPE": 10, ++ "SOCKET_EAI_SYSTEM": 11, ++ "SOCKET_EAI_BADHINTS": 12, ++ "SOCKET_EAI_PROTOCOL": 13, ++ "SOCKET_EAI_OVERFLOW": 14, ++ "SOCKET_EAI_MAX": 15, ++} ++ ++func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { ++ p := new(ResolveReply_ErrorCode) ++ *p = x ++ return p ++} ++func (x ResolveReply_ErrorCode) String() string { ++ return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) ++} ++func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") ++ if err != nil { ++ return err ++ } ++ *x = ResolveReply_ErrorCode(value) ++ return nil ++} ++func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} ++} ++ ++type RemoteSocketServiceError struct { ++ SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` ++ ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } ++func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } ++func (*RemoteSocketServiceError) ProtoMessage() {} ++func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} ++} ++func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) ++} ++func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) ++} ++func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) ++} ++func (m *RemoteSocketServiceError) XXX_Size() int { ++ return xxx_messageInfo_RemoteSocketServiceError.Size(m) ++} ++func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { ++ xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo ++ ++const Default_RemoteSocketServiceError_SystemError int32 = 0 ++ ++func (m *RemoteSocketServiceError) GetSystemError() int32 { ++ if m != nil && m.SystemError != nil { ++ return *m.SystemError ++ } ++ return Default_RemoteSocketServiceError_SystemError ++} ++ ++func (m *RemoteSocketServiceError) GetErrorDetail() string { ++ if m != nil && m.ErrorDetail != nil { ++ return *m.ErrorDetail ++ } ++ return "" ++} ++ ++type AddressPort struct { ++ Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` ++ PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` ++ HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *AddressPort) Reset() { *m = AddressPort{} } ++func (m *AddressPort) String() string { return proto.CompactTextString(m) } ++func (*AddressPort) ProtoMessage() {} ++func (*AddressPort) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} ++} ++func (m *AddressPort) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_AddressPort.Unmarshal(m, b) ++} ++func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) ++} ++func (dst *AddressPort) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_AddressPort.Merge(dst, src) ++} ++func (m *AddressPort) XXX_Size() int { ++ return xxx_messageInfo_AddressPort.Size(m) ++} ++func (m *AddressPort) XXX_DiscardUnknown() { ++ xxx_messageInfo_AddressPort.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_AddressPort proto.InternalMessageInfo ++ ++func (m *AddressPort) GetPort() int32 { ++ if m != nil && m.Port != nil { ++ return *m.Port ++ } ++ return 0 ++} ++ ++func (m *AddressPort) GetPackedAddress() []byte { ++ if m != nil { ++ return m.PackedAddress ++ } ++ return nil ++} ++ ++func (m *AddressPort) GetHostnameHint() string { ++ if m != nil && m.HostnameHint != nil { ++ return *m.HostnameHint ++ } ++ return "" ++} ++ ++type CreateSocketRequest struct { ++ Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` ++ Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` ++ SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` ++ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` ++ RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` ++ AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` ++ ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } ++func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } ++func (*CreateSocketRequest) ProtoMessage() {} ++func (*CreateSocketRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} ++} ++func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) ++} ++func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) ++} ++func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CreateSocketRequest.Merge(dst, src) ++} ++func (m *CreateSocketRequest) XXX_Size() int { ++ return xxx_messageInfo_CreateSocketRequest.Size(m) ++} ++func (m *CreateSocketRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo ++ ++const Default_CreateSocketRequest_ListenBacklog int32 = 0 ++ ++func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { ++ if m != nil && m.Family != nil { ++ return *m.Family ++ } ++ return CreateSocketRequest_IPv4 ++} ++ ++func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { ++ if m != nil && m.Protocol != nil { ++ return *m.Protocol ++ } ++ return CreateSocketRequest_TCP ++} ++ ++func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { ++ if m != nil { ++ return m.SocketOptions ++ } ++ return nil ++} ++ ++func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++func (m *CreateSocketRequest) GetListenBacklog() int32 { ++ if m != nil && m.ListenBacklog != nil { ++ return *m.ListenBacklog ++ } ++ return Default_CreateSocketRequest_ListenBacklog ++} ++ ++func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { ++ if m != nil { ++ return m.RemoteIp ++ } ++ return nil ++} ++ ++func (m *CreateSocketRequest) GetAppId() string { ++ if m != nil && m.AppId != nil { ++ return *m.AppId ++ } ++ return "" ++} ++ ++func (m *CreateSocketRequest) GetProjectId() int64 { ++ if m != nil && m.ProjectId != nil { ++ return *m.ProjectId ++ } ++ return 0 ++} ++ ++type CreateSocketReply struct { ++ SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` ++ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ proto.XXX_InternalExtensions `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } ++func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } ++func (*CreateSocketReply) ProtoMessage() {} ++func (*CreateSocketReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} ++} ++ ++var extRange_CreateSocketReply = []proto.ExtensionRange{ ++ {Start: 1000, End: 536870911}, ++} ++ ++func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { ++ return extRange_CreateSocketReply ++} ++func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) ++} ++func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) ++} ++func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CreateSocketReply.Merge(dst, src) ++} ++func (m *CreateSocketReply) XXX_Size() int { ++ return xxx_messageInfo_CreateSocketReply.Size(m) ++} ++func (m *CreateSocketReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo ++ ++func (m *CreateSocketReply) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *CreateSocketReply) GetServerAddress() *AddressPort { ++ if m != nil { ++ return m.ServerAddress ++ } ++ return nil ++} ++ ++func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++type BindRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *BindRequest) Reset() { *m = BindRequest{} } ++func (m *BindRequest) String() string { return proto.CompactTextString(m) } ++func (*BindRequest) ProtoMessage() {} ++func (*BindRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} ++} ++func (m *BindRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_BindRequest.Unmarshal(m, b) ++} ++func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) ++} ++func (dst *BindRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_BindRequest.Merge(dst, src) ++} ++func (m *BindRequest) XXX_Size() int { ++ return xxx_messageInfo_BindRequest.Size(m) ++} ++func (m *BindRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_BindRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_BindRequest proto.InternalMessageInfo ++ ++func (m *BindRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *BindRequest) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++type BindReply struct { ++ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *BindReply) Reset() { *m = BindReply{} } ++func (m *BindReply) String() string { return proto.CompactTextString(m) } ++func (*BindReply) ProtoMessage() {} ++func (*BindReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} ++} ++func (m *BindReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_BindReply.Unmarshal(m, b) ++} ++func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) ++} ++func (dst *BindReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_BindReply.Merge(dst, src) ++} ++func (m *BindReply) XXX_Size() int { ++ return xxx_messageInfo_BindReply.Size(m) ++} ++func (m *BindReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_BindReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_BindReply proto.InternalMessageInfo ++ ++func (m *BindReply) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++type GetSocketNameRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } ++func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } ++func (*GetSocketNameRequest) ProtoMessage() {} ++func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} ++} ++func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) ++} ++func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) ++} ++func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) ++} ++func (m *GetSocketNameRequest) XXX_Size() int { ++ return xxx_messageInfo_GetSocketNameRequest.Size(m) ++} ++func (m *GetSocketNameRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo ++ ++func (m *GetSocketNameRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++type GetSocketNameReply struct { ++ ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } ++func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } ++func (*GetSocketNameReply) ProtoMessage() {} ++func (*GetSocketNameReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} ++} ++func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) ++} ++func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) ++} ++func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetSocketNameReply.Merge(dst, src) ++} ++func (m *GetSocketNameReply) XXX_Size() int { ++ return xxx_messageInfo_GetSocketNameReply.Size(m) ++} ++func (m *GetSocketNameReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo ++ ++func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++type GetPeerNameRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } ++func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } ++func (*GetPeerNameRequest) ProtoMessage() {} ++func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} ++} ++func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) ++} ++func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) ++} ++func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) ++} ++func (m *GetPeerNameRequest) XXX_Size() int { ++ return xxx_messageInfo_GetPeerNameRequest.Size(m) ++} ++func (m *GetPeerNameRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo ++ ++func (m *GetPeerNameRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++type GetPeerNameReply struct { ++ PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } ++func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } ++func (*GetPeerNameReply) ProtoMessage() {} ++func (*GetPeerNameReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} ++} ++func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) ++} ++func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) ++} ++func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetPeerNameReply.Merge(dst, src) ++} ++func (m *GetPeerNameReply) XXX_Size() int { ++ return xxx_messageInfo_GetPeerNameReply.Size(m) ++} ++func (m *GetPeerNameReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo ++ ++func (m *GetPeerNameReply) GetPeerIp() *AddressPort { ++ if m != nil { ++ return m.PeerIp ++ } ++ return nil ++} ++ ++type SocketOption struct { ++ Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` ++ Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` ++ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *SocketOption) Reset() { *m = SocketOption{} } ++func (m *SocketOption) String() string { return proto.CompactTextString(m) } ++func (*SocketOption) ProtoMessage() {} ++func (*SocketOption) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} ++} ++func (m *SocketOption) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_SocketOption.Unmarshal(m, b) ++} ++func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) ++} ++func (dst *SocketOption) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SocketOption.Merge(dst, src) ++} ++func (m *SocketOption) XXX_Size() int { ++ return xxx_messageInfo_SocketOption.Size(m) ++} ++func (m *SocketOption) XXX_DiscardUnknown() { ++ xxx_messageInfo_SocketOption.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SocketOption proto.InternalMessageInfo ++ ++func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { ++ if m != nil && m.Level != nil { ++ return *m.Level ++ } ++ return SocketOption_SOCKET_SOL_IP ++} ++ ++func (m *SocketOption) GetOption() SocketOption_SocketOptionName { ++ if m != nil && m.Option != nil { ++ return *m.Option ++ } ++ return SocketOption_SOCKET_SO_DEBUG ++} ++ ++func (m *SocketOption) GetValue() []byte { ++ if m != nil { ++ return m.Value ++ } ++ return nil ++} ++ ++type SetSocketOptionsRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } ++func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } ++func (*SetSocketOptionsRequest) ProtoMessage() {} ++func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} ++} ++func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) ++} ++func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) ++} ++func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) ++} ++func (m *SetSocketOptionsRequest) XXX_Size() int { ++ return xxx_messageInfo_SetSocketOptionsRequest.Size(m) ++} ++func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo ++ ++func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { ++ if m != nil { ++ return m.Options ++ } ++ return nil ++} ++ ++type SetSocketOptionsReply struct { ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } ++func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } ++func (*SetSocketOptionsReply) ProtoMessage() {} ++func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} ++} ++func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) ++} ++func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) ++} ++func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) ++} ++func (m *SetSocketOptionsReply) XXX_Size() int { ++ return xxx_messageInfo_SetSocketOptionsReply.Size(m) ++} ++func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo ++ ++type GetSocketOptionsRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } ++func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } ++func (*GetSocketOptionsRequest) ProtoMessage() {} ++func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} ++} ++func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) ++} ++func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) ++} ++func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) ++} ++func (m *GetSocketOptionsRequest) XXX_Size() int { ++ return xxx_messageInfo_GetSocketOptionsRequest.Size(m) ++} ++func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo ++ ++func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { ++ if m != nil { ++ return m.Options ++ } ++ return nil ++} ++ ++type GetSocketOptionsReply struct { ++ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } ++func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } ++func (*GetSocketOptionsReply) ProtoMessage() {} ++func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} ++} ++func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) ++} ++func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) ++} ++func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) ++} ++func (m *GetSocketOptionsReply) XXX_Size() int { ++ return xxx_messageInfo_GetSocketOptionsReply.Size(m) ++} ++func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo ++ ++func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { ++ if m != nil { ++ return m.Options ++ } ++ return nil ++} ++ ++type ConnectRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` ++ TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } ++func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } ++func (*ConnectRequest) ProtoMessage() {} ++func (*ConnectRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} ++} ++func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) ++} ++func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) ++} ++func (dst *ConnectRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ConnectRequest.Merge(dst, src) ++} ++func (m *ConnectRequest) XXX_Size() int { ++ return xxx_messageInfo_ConnectRequest.Size(m) ++} ++func (m *ConnectRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ConnectRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo ++ ++const Default_ConnectRequest_TimeoutSeconds float64 = -1 ++ ++func (m *ConnectRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *ConnectRequest) GetRemoteIp() *AddressPort { ++ if m != nil { ++ return m.RemoteIp ++ } ++ return nil ++} ++ ++func (m *ConnectRequest) GetTimeoutSeconds() float64 { ++ if m != nil && m.TimeoutSeconds != nil { ++ return *m.TimeoutSeconds ++ } ++ return Default_ConnectRequest_TimeoutSeconds ++} ++ ++type ConnectReply struct { ++ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ proto.XXX_InternalExtensions `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ConnectReply) Reset() { *m = ConnectReply{} } ++func (m *ConnectReply) String() string { return proto.CompactTextString(m) } ++func (*ConnectReply) ProtoMessage() {} ++func (*ConnectReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} ++} ++ ++var extRange_ConnectReply = []proto.ExtensionRange{ ++ {Start: 1000, End: 536870911}, ++} ++ ++func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { ++ return extRange_ConnectReply ++} ++func (m *ConnectReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ConnectReply.Unmarshal(m, b) ++} ++func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) ++} ++func (dst *ConnectReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ConnectReply.Merge(dst, src) ++} ++func (m *ConnectReply) XXX_Size() int { ++ return xxx_messageInfo_ConnectReply.Size(m) ++} ++func (m *ConnectReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_ConnectReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ConnectReply proto.InternalMessageInfo ++ ++func (m *ConnectReply) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++type ListenRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ListenRequest) Reset() { *m = ListenRequest{} } ++func (m *ListenRequest) String() string { return proto.CompactTextString(m) } ++func (*ListenRequest) ProtoMessage() {} ++func (*ListenRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} ++} ++func (m *ListenRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ListenRequest.Unmarshal(m, b) ++} ++func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) ++} ++func (dst *ListenRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ListenRequest.Merge(dst, src) ++} ++func (m *ListenRequest) XXX_Size() int { ++ return xxx_messageInfo_ListenRequest.Size(m) ++} ++func (m *ListenRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ListenRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ListenRequest proto.InternalMessageInfo ++ ++func (m *ListenRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *ListenRequest) GetBacklog() int32 { ++ if m != nil && m.Backlog != nil { ++ return *m.Backlog ++ } ++ return 0 ++} ++ ++type ListenReply struct { ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ListenReply) Reset() { *m = ListenReply{} } ++func (m *ListenReply) String() string { return proto.CompactTextString(m) } ++func (*ListenReply) ProtoMessage() {} ++func (*ListenReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} ++} ++func (m *ListenReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ListenReply.Unmarshal(m, b) ++} ++func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) ++} ++func (dst *ListenReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ListenReply.Merge(dst, src) ++} ++func (m *ListenReply) XXX_Size() int { ++ return xxx_messageInfo_ListenReply.Size(m) ++} ++func (m *ListenReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_ListenReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ListenReply proto.InternalMessageInfo ++ ++type AcceptRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } ++func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } ++func (*AcceptRequest) ProtoMessage() {} ++func (*AcceptRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} ++} ++func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) ++} ++func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) ++} ++func (dst *AcceptRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_AcceptRequest.Merge(dst, src) ++} ++func (m *AcceptRequest) XXX_Size() int { ++ return xxx_messageInfo_AcceptRequest.Size(m) ++} ++func (m *AcceptRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_AcceptRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo ++ ++const Default_AcceptRequest_TimeoutSeconds float64 = -1 ++ ++func (m *AcceptRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *AcceptRequest) GetTimeoutSeconds() float64 { ++ if m != nil && m.TimeoutSeconds != nil { ++ return *m.TimeoutSeconds ++ } ++ return Default_AcceptRequest_TimeoutSeconds ++} ++ ++type AcceptReply struct { ++ NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` ++ RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *AcceptReply) Reset() { *m = AcceptReply{} } ++func (m *AcceptReply) String() string { return proto.CompactTextString(m) } ++func (*AcceptReply) ProtoMessage() {} ++func (*AcceptReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} ++} ++func (m *AcceptReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_AcceptReply.Unmarshal(m, b) ++} ++func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) ++} ++func (dst *AcceptReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_AcceptReply.Merge(dst, src) ++} ++func (m *AcceptReply) XXX_Size() int { ++ return xxx_messageInfo_AcceptReply.Size(m) ++} ++func (m *AcceptReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_AcceptReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_AcceptReply proto.InternalMessageInfo ++ ++func (m *AcceptReply) GetNewSocketDescriptor() []byte { ++ if m != nil { ++ return m.NewSocketDescriptor ++ } ++ return nil ++} ++ ++func (m *AcceptReply) GetRemoteAddress() *AddressPort { ++ if m != nil { ++ return m.RemoteAddress ++ } ++ return nil ++} ++ ++type ShutDownRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` ++ SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } ++func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } ++func (*ShutDownRequest) ProtoMessage() {} ++func (*ShutDownRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} ++} ++func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) ++} ++func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) ++} ++func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ShutDownRequest.Merge(dst, src) ++} ++func (m *ShutDownRequest) XXX_Size() int { ++ return xxx_messageInfo_ShutDownRequest.Size(m) ++} ++func (m *ShutDownRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo ++ ++func (m *ShutDownRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *ShutDownRequest) GetHow() ShutDownRequest_How { ++ if m != nil && m.How != nil { ++ return *m.How ++ } ++ return ShutDownRequest_SOCKET_SHUT_RD ++} ++ ++func (m *ShutDownRequest) GetSendOffset() int64 { ++ if m != nil && m.SendOffset != nil { ++ return *m.SendOffset ++ } ++ return 0 ++} ++ ++type ShutDownReply struct { ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } ++func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } ++func (*ShutDownReply) ProtoMessage() {} ++func (*ShutDownReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} ++} ++func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) ++} ++func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) ++} ++func (dst *ShutDownReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ShutDownReply.Merge(dst, src) ++} ++func (m *ShutDownReply) XXX_Size() int { ++ return xxx_messageInfo_ShutDownReply.Size(m) ++} ++func (m *ShutDownReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_ShutDownReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo ++ ++type CloseRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *CloseRequest) Reset() { *m = CloseRequest{} } ++func (m *CloseRequest) String() string { return proto.CompactTextString(m) } ++func (*CloseRequest) ProtoMessage() {} ++func (*CloseRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} ++} ++func (m *CloseRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_CloseRequest.Unmarshal(m, b) ++} ++func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) ++} ++func (dst *CloseRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CloseRequest.Merge(dst, src) ++} ++func (m *CloseRequest) XXX_Size() int { ++ return xxx_messageInfo_CloseRequest.Size(m) ++} ++func (m *CloseRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_CloseRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_CloseRequest proto.InternalMessageInfo ++ ++const Default_CloseRequest_SendOffset int64 = -1 ++ ++func (m *CloseRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *CloseRequest) GetSendOffset() int64 { ++ if m != nil && m.SendOffset != nil { ++ return *m.SendOffset ++ } ++ return Default_CloseRequest_SendOffset ++} ++ ++type CloseReply struct { ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *CloseReply) Reset() { *m = CloseReply{} } ++func (m *CloseReply) String() string { return proto.CompactTextString(m) } ++func (*CloseReply) ProtoMessage() {} ++func (*CloseReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} ++} ++func (m *CloseReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_CloseReply.Unmarshal(m, b) ++} ++func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) ++} ++func (dst *CloseReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CloseReply.Merge(dst, src) ++} ++func (m *CloseReply) XXX_Size() int { ++ return xxx_messageInfo_CloseReply.Size(m) ++} ++func (m *CloseReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_CloseReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_CloseReply proto.InternalMessageInfo ++ ++type SendRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` ++ StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` ++ Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` ++ SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` ++ TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *SendRequest) Reset() { *m = SendRequest{} } ++func (m *SendRequest) String() string { return proto.CompactTextString(m) } ++func (*SendRequest) ProtoMessage() {} ++func (*SendRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} ++} ++func (m *SendRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_SendRequest.Unmarshal(m, b) ++} ++func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) ++} ++func (dst *SendRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SendRequest.Merge(dst, src) ++} ++func (m *SendRequest) XXX_Size() int { ++ return xxx_messageInfo_SendRequest.Size(m) ++} ++func (m *SendRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_SendRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SendRequest proto.InternalMessageInfo ++ ++const Default_SendRequest_Flags int32 = 0 ++const Default_SendRequest_TimeoutSeconds float64 = -1 ++ ++func (m *SendRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *SendRequest) GetData() []byte { ++ if m != nil { ++ return m.Data ++ } ++ return nil ++} ++ ++func (m *SendRequest) GetStreamOffset() int64 { ++ if m != nil && m.StreamOffset != nil { ++ return *m.StreamOffset ++ } ++ return 0 ++} ++ ++func (m *SendRequest) GetFlags() int32 { ++ if m != nil && m.Flags != nil { ++ return *m.Flags ++ } ++ return Default_SendRequest_Flags ++} ++ ++func (m *SendRequest) GetSendTo() *AddressPort { ++ if m != nil { ++ return m.SendTo ++ } ++ return nil ++} ++ ++func (m *SendRequest) GetTimeoutSeconds() float64 { ++ if m != nil && m.TimeoutSeconds != nil { ++ return *m.TimeoutSeconds ++ } ++ return Default_SendRequest_TimeoutSeconds ++} ++ ++type SendReply struct { ++ DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *SendReply) Reset() { *m = SendReply{} } ++func (m *SendReply) String() string { return proto.CompactTextString(m) } ++func (*SendReply) ProtoMessage() {} ++func (*SendReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} ++} ++func (m *SendReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_SendReply.Unmarshal(m, b) ++} ++func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) ++} ++func (dst *SendReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SendReply.Merge(dst, src) ++} ++func (m *SendReply) XXX_Size() int { ++ return xxx_messageInfo_SendReply.Size(m) ++} ++func (m *SendReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_SendReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SendReply proto.InternalMessageInfo ++ ++func (m *SendReply) GetDataSent() int32 { ++ if m != nil && m.DataSent != nil { ++ return *m.DataSent ++ } ++ return 0 ++} ++ ++type ReceiveRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` ++ Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` ++ TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } ++func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } ++func (*ReceiveRequest) ProtoMessage() {} ++func (*ReceiveRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} ++} ++func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) ++} ++func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) ++} ++func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ReceiveRequest.Merge(dst, src) ++} ++func (m *ReceiveRequest) XXX_Size() int { ++ return xxx_messageInfo_ReceiveRequest.Size(m) ++} ++func (m *ReceiveRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo ++ ++const Default_ReceiveRequest_Flags int32 = 0 ++const Default_ReceiveRequest_TimeoutSeconds float64 = -1 ++ ++func (m *ReceiveRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *ReceiveRequest) GetDataSize() int32 { ++ if m != nil && m.DataSize != nil { ++ return *m.DataSize ++ } ++ return 0 ++} ++ ++func (m *ReceiveRequest) GetFlags() int32 { ++ if m != nil && m.Flags != nil { ++ return *m.Flags ++ } ++ return Default_ReceiveRequest_Flags ++} ++ ++func (m *ReceiveRequest) GetTimeoutSeconds() float64 { ++ if m != nil && m.TimeoutSeconds != nil { ++ return *m.TimeoutSeconds ++ } ++ return Default_ReceiveRequest_TimeoutSeconds ++} ++ ++type ReceiveReply struct { ++ StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` ++ Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` ++ ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` ++ BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } ++func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } ++func (*ReceiveReply) ProtoMessage() {} ++func (*ReceiveReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} ++} ++func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) ++} ++func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) ++} ++func (dst *ReceiveReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ReceiveReply.Merge(dst, src) ++} ++func (m *ReceiveReply) XXX_Size() int { ++ return xxx_messageInfo_ReceiveReply.Size(m) ++} ++func (m *ReceiveReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_ReceiveReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo ++ ++func (m *ReceiveReply) GetStreamOffset() int64 { ++ if m != nil && m.StreamOffset != nil { ++ return *m.StreamOffset ++ } ++ return 0 ++} ++ ++func (m *ReceiveReply) GetData() []byte { ++ if m != nil { ++ return m.Data ++ } ++ return nil ++} ++ ++func (m *ReceiveReply) GetReceivedFrom() *AddressPort { ++ if m != nil { ++ return m.ReceivedFrom ++ } ++ return nil ++} ++ ++func (m *ReceiveReply) GetBufferSize() int32 { ++ if m != nil && m.BufferSize != nil { ++ return *m.BufferSize ++ } ++ return 0 ++} ++ ++type PollEvent struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` ++ ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *PollEvent) Reset() { *m = PollEvent{} } ++func (m *PollEvent) String() string { return proto.CompactTextString(m) } ++func (*PollEvent) ProtoMessage() {} ++func (*PollEvent) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} ++} ++func (m *PollEvent) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_PollEvent.Unmarshal(m, b) ++} ++func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) ++} ++func (dst *PollEvent) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_PollEvent.Merge(dst, src) ++} ++func (m *PollEvent) XXX_Size() int { ++ return xxx_messageInfo_PollEvent.Size(m) ++} ++func (m *PollEvent) XXX_DiscardUnknown() { ++ xxx_messageInfo_PollEvent.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_PollEvent proto.InternalMessageInfo ++ ++func (m *PollEvent) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *PollEvent) GetRequestedEvents() int32 { ++ if m != nil && m.RequestedEvents != nil { ++ return *m.RequestedEvents ++ } ++ return 0 ++} ++ ++func (m *PollEvent) GetObservedEvents() int32 { ++ if m != nil && m.ObservedEvents != nil { ++ return *m.ObservedEvents ++ } ++ return 0 ++} ++ ++type PollRequest struct { ++ Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` ++ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *PollRequest) Reset() { *m = PollRequest{} } ++func (m *PollRequest) String() string { return proto.CompactTextString(m) } ++func (*PollRequest) ProtoMessage() {} ++func (*PollRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} ++} ++func (m *PollRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_PollRequest.Unmarshal(m, b) ++} ++func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) ++} ++func (dst *PollRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_PollRequest.Merge(dst, src) ++} ++func (m *PollRequest) XXX_Size() int { ++ return xxx_messageInfo_PollRequest.Size(m) ++} ++func (m *PollRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_PollRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_PollRequest proto.InternalMessageInfo ++ ++const Default_PollRequest_TimeoutSeconds float64 = -1 ++ ++func (m *PollRequest) GetEvents() []*PollEvent { ++ if m != nil { ++ return m.Events ++ } ++ return nil ++} ++ ++func (m *PollRequest) GetTimeoutSeconds() float64 { ++ if m != nil && m.TimeoutSeconds != nil { ++ return *m.TimeoutSeconds ++ } ++ return Default_PollRequest_TimeoutSeconds ++} ++ ++type PollReply struct { ++ Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *PollReply) Reset() { *m = PollReply{} } ++func (m *PollReply) String() string { return proto.CompactTextString(m) } ++func (*PollReply) ProtoMessage() {} ++func (*PollReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} ++} ++func (m *PollReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_PollReply.Unmarshal(m, b) ++} ++func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) ++} ++func (dst *PollReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_PollReply.Merge(dst, src) ++} ++func (m *PollReply) XXX_Size() int { ++ return xxx_messageInfo_PollReply.Size(m) ++} ++func (m *PollReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_PollReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_PollReply proto.InternalMessageInfo ++ ++func (m *PollReply) GetEvents() []*PollEvent { ++ if m != nil { ++ return m.Events ++ } ++ return nil ++} ++ ++type ResolveRequest struct { ++ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` ++ AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } ++func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } ++func (*ResolveRequest) ProtoMessage() {} ++func (*ResolveRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} ++} ++func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) ++} ++func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) ++} ++func (dst *ResolveRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ResolveRequest.Merge(dst, src) ++} ++func (m *ResolveRequest) XXX_Size() int { ++ return xxx_messageInfo_ResolveRequest.Size(m) ++} ++func (m *ResolveRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ResolveRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo ++ ++func (m *ResolveRequest) GetName() string { ++ if m != nil && m.Name != nil { ++ return *m.Name ++ } ++ return "" ++} ++ ++func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { ++ if m != nil { ++ return m.AddressFamilies ++ } ++ return nil ++} ++ ++type ResolveReply struct { ++ PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` ++ CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` ++ Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ResolveReply) Reset() { *m = ResolveReply{} } ++func (m *ResolveReply) String() string { return proto.CompactTextString(m) } ++func (*ResolveReply) ProtoMessage() {} ++func (*ResolveReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} ++} ++func (m *ResolveReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ResolveReply.Unmarshal(m, b) ++} ++func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) ++} ++func (dst *ResolveReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ResolveReply.Merge(dst, src) ++} ++func (m *ResolveReply) XXX_Size() int { ++ return xxx_messageInfo_ResolveReply.Size(m) ++} ++func (m *ResolveReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_ResolveReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ResolveReply proto.InternalMessageInfo ++ ++func (m *ResolveReply) GetPackedAddress() [][]byte { ++ if m != nil { ++ return m.PackedAddress ++ } ++ return nil ++} ++ ++func (m *ResolveReply) GetCanonicalName() string { ++ if m != nil && m.CanonicalName != nil { ++ return *m.CanonicalName ++ } ++ return "" ++} ++ ++func (m *ResolveReply) GetAliases() []string { ++ if m != nil { ++ return m.Aliases ++ } ++ return nil ++} ++ ++func init() { ++ proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") ++ proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") ++ proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") ++ proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") ++ proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") ++ proto.RegisterType((*BindReply)(nil), "appengine.BindReply") ++ proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") ++ proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") ++ proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") ++ proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") ++ proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") ++ proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") ++ proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") ++ proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") ++ proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") ++ proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") ++ proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") ++ proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") ++ proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") ++ proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") ++ proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") ++ proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") ++ proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") ++ proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") ++ proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") ++ proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") ++ proto.RegisterType((*SendReply)(nil), "appengine.SendReply") ++ proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") ++ proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") ++ proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") ++ proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") ++ proto.RegisterType((*PollReply)(nil), "appengine.PollReply") ++ proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") ++ proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") ++} ++ ++func init() { ++ proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) ++} ++ ++var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ ++ // 3088 bytes of a gzipped FileDescriptorProto ++ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, ++ 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, ++ 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, ++ 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, ++ 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, ++ 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, ++ 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, ++ 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, ++ 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, ++ 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, ++ 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, ++ 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, ++ 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, ++ 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, ++ 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, ++ 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, ++ 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, ++ 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, ++ 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, ++ 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, ++ 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, ++ 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, ++ 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, ++ 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, ++ 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, ++ 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, ++ 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, ++ 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, ++ 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, ++ 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, ++ 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, ++ 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, ++ 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, ++ 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, ++ 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, ++ 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, ++ 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, ++ 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, ++ 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, ++ 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, ++ 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, ++ 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, ++ 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, ++ 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, ++ 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, ++ 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, ++ 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, ++ 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, ++ 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, ++ 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, ++ 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, ++ 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, ++ 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, ++ 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, ++ 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, ++ 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, ++ 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, ++ 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, ++ 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, ++ 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, ++ 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, ++ 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, ++ 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, ++ 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, ++ 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, ++ 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, ++ 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, ++ 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, ++ 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, ++ 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, ++ 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, ++ 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, ++ 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, ++ 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, ++ 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, ++ 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, ++ 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, ++ 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, ++ 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, ++ 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, ++ 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, ++ 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, ++ 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, ++ 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, ++ 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, ++ 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, ++ 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, ++ 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, ++ 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, ++ 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, ++ 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, ++ 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, ++ 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, ++ 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, ++ 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, ++ 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, ++ 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, ++ 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, ++ 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, ++ 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, ++ 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, ++ 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, ++ 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, ++ 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, ++ 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, ++ 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, ++ 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, ++ 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, ++ 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, ++ 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, ++ 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, ++ 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, ++ 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, ++ 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, ++ 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, ++ 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, ++ 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, ++ 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, ++ 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, ++ 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, ++ 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, ++ 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, ++ 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, ++ 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, ++ 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, ++ 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, ++ 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, ++ 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, ++ 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, ++ 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, ++ 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, ++ 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, ++ 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, ++ 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, ++ 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, ++ 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, ++ 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, ++ 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, ++ 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, ++ 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, ++ 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, ++ 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, ++ 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, ++ 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, ++ 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, ++ 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, ++ 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, ++ 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, ++ 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, ++ 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, ++ 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, ++ 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, ++ 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, ++ 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, ++ 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, ++ 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, ++ 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, ++ 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, ++ 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, ++ 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, ++ 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, ++ 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, ++ 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, ++ 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, ++ 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, ++ 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, ++ 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, ++ 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, ++ 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, ++ 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, ++ 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, ++ 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, ++ 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, ++ 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, ++ 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, ++ 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, ++ 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, ++ 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, ++ 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, ++ 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, ++ 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, ++ 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, ++ 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, ++ 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, ++ 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, ++ 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, ++ 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, ++ 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, ++ 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, ++ 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, ++ 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, ++ 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, ++ 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, ++} +diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto +new file mode 100644 +index 00000000000..2fcc7953dc0 +--- /dev/null ++++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto +@@ -0,0 +1,460 @@ ++syntax = "proto2"; ++option go_package = "socket"; ++ ++package appengine; ++ ++message RemoteSocketServiceError { ++ enum ErrorCode { ++ SYSTEM_ERROR = 1; ++ GAI_ERROR = 2; ++ FAILURE = 4; ++ PERMISSION_DENIED = 5; ++ INVALID_REQUEST = 6; ++ SOCKET_CLOSED = 7; ++ } ++ ++ enum SystemError { ++ option allow_alias = true; ++ ++ SYS_SUCCESS = 0; ++ SYS_EPERM = 1; ++ SYS_ENOENT = 2; ++ SYS_ESRCH = 3; ++ SYS_EINTR = 4; ++ SYS_EIO = 5; ++ SYS_ENXIO = 6; ++ SYS_E2BIG = 7; ++ SYS_ENOEXEC = 8; ++ SYS_EBADF = 9; ++ SYS_ECHILD = 10; ++ SYS_EAGAIN = 11; ++ SYS_EWOULDBLOCK = 11; ++ SYS_ENOMEM = 12; ++ SYS_EACCES = 13; ++ SYS_EFAULT = 14; ++ SYS_ENOTBLK = 15; ++ SYS_EBUSY = 16; ++ SYS_EEXIST = 17; ++ SYS_EXDEV = 18; ++ SYS_ENODEV = 19; ++ SYS_ENOTDIR = 20; ++ SYS_EISDIR = 21; ++ SYS_EINVAL = 22; ++ SYS_ENFILE = 23; ++ SYS_EMFILE = 24; ++ SYS_ENOTTY = 25; ++ SYS_ETXTBSY = 26; ++ SYS_EFBIG = 27; ++ SYS_ENOSPC = 28; ++ SYS_ESPIPE = 29; ++ SYS_EROFS = 30; ++ SYS_EMLINK = 31; ++ SYS_EPIPE = 32; ++ SYS_EDOM = 33; ++ SYS_ERANGE = 34; ++ SYS_EDEADLK = 35; ++ SYS_EDEADLOCK = 35; ++ SYS_ENAMETOOLONG = 36; ++ SYS_ENOLCK = 37; ++ SYS_ENOSYS = 38; ++ SYS_ENOTEMPTY = 39; ++ SYS_ELOOP = 40; ++ SYS_ENOMSG = 42; ++ SYS_EIDRM = 43; ++ SYS_ECHRNG = 44; ++ SYS_EL2NSYNC = 45; ++ SYS_EL3HLT = 46; ++ SYS_EL3RST = 47; ++ SYS_ELNRNG = 48; ++ SYS_EUNATCH = 49; ++ SYS_ENOCSI = 50; ++ SYS_EL2HLT = 51; ++ SYS_EBADE = 52; ++ SYS_EBADR = 53; ++ SYS_EXFULL = 54; ++ SYS_ENOANO = 55; ++ SYS_EBADRQC = 56; ++ SYS_EBADSLT = 57; ++ SYS_EBFONT = 59; ++ SYS_ENOSTR = 60; ++ SYS_ENODATA = 61; ++ SYS_ETIME = 62; ++ SYS_ENOSR = 63; ++ SYS_ENONET = 64; ++ SYS_ENOPKG = 65; ++ SYS_EREMOTE = 66; ++ SYS_ENOLINK = 67; ++ SYS_EADV = 68; ++ SYS_ESRMNT = 69; ++ SYS_ECOMM = 70; ++ SYS_EPROTO = 71; ++ SYS_EMULTIHOP = 72; ++ SYS_EDOTDOT = 73; ++ SYS_EBADMSG = 74; ++ SYS_EOVERFLOW = 75; ++ SYS_ENOTUNIQ = 76; ++ SYS_EBADFD = 77; ++ SYS_EREMCHG = 78; ++ SYS_ELIBACC = 79; ++ SYS_ELIBBAD = 80; ++ SYS_ELIBSCN = 81; ++ SYS_ELIBMAX = 82; ++ SYS_ELIBEXEC = 83; ++ SYS_EILSEQ = 84; ++ SYS_ERESTART = 85; ++ SYS_ESTRPIPE = 86; ++ SYS_EUSERS = 87; ++ SYS_ENOTSOCK = 88; ++ SYS_EDESTADDRREQ = 89; ++ SYS_EMSGSIZE = 90; ++ SYS_EPROTOTYPE = 91; ++ SYS_ENOPROTOOPT = 92; ++ SYS_EPROTONOSUPPORT = 93; ++ SYS_ESOCKTNOSUPPORT = 94; ++ SYS_EOPNOTSUPP = 95; ++ SYS_ENOTSUP = 95; ++ SYS_EPFNOSUPPORT = 96; ++ SYS_EAFNOSUPPORT = 97; ++ SYS_EADDRINUSE = 98; ++ SYS_EADDRNOTAVAIL = 99; ++ SYS_ENETDOWN = 100; ++ SYS_ENETUNREACH = 101; ++ SYS_ENETRESET = 102; ++ SYS_ECONNABORTED = 103; ++ SYS_ECONNRESET = 104; ++ SYS_ENOBUFS = 105; ++ SYS_EISCONN = 106; ++ SYS_ENOTCONN = 107; ++ SYS_ESHUTDOWN = 108; ++ SYS_ETOOMANYREFS = 109; ++ SYS_ETIMEDOUT = 110; ++ SYS_ECONNREFUSED = 111; ++ SYS_EHOSTDOWN = 112; ++ SYS_EHOSTUNREACH = 113; ++ SYS_EALREADY = 114; ++ SYS_EINPROGRESS = 115; ++ SYS_ESTALE = 116; ++ SYS_EUCLEAN = 117; ++ SYS_ENOTNAM = 118; ++ SYS_ENAVAIL = 119; ++ SYS_EISNAM = 120; ++ SYS_EREMOTEIO = 121; ++ SYS_EDQUOT = 122; ++ SYS_ENOMEDIUM = 123; ++ SYS_EMEDIUMTYPE = 124; ++ SYS_ECANCELED = 125; ++ SYS_ENOKEY = 126; ++ SYS_EKEYEXPIRED = 127; ++ SYS_EKEYREVOKED = 128; ++ SYS_EKEYREJECTED = 129; ++ SYS_EOWNERDEAD = 130; ++ SYS_ENOTRECOVERABLE = 131; ++ SYS_ERFKILL = 132; ++ } ++ ++ optional int32 system_error = 1 [default=0]; ++ optional string error_detail = 2; ++} ++ ++message AddressPort { ++ required int32 port = 1; ++ optional bytes packed_address = 2; ++ ++ optional string hostname_hint = 3; ++} ++ ++ ++ ++message CreateSocketRequest { ++ enum SocketFamily { ++ IPv4 = 1; ++ IPv6 = 2; ++ } ++ ++ enum SocketProtocol { ++ TCP = 1; ++ UDP = 2; ++ } ++ ++ required SocketFamily family = 1; ++ required SocketProtocol protocol = 2; ++ ++ repeated SocketOption socket_options = 3; ++ ++ optional AddressPort proxy_external_ip = 4; ++ ++ optional int32 listen_backlog = 5 [default=0]; ++ ++ optional AddressPort remote_ip = 6; ++ ++ optional string app_id = 9; ++ ++ optional int64 project_id = 10; ++} ++ ++message CreateSocketReply { ++ optional string socket_descriptor = 1; ++ ++ optional AddressPort server_address = 3; ++ ++ optional AddressPort proxy_external_ip = 4; ++ ++ extensions 1000 to max; ++} ++ ++ ++ ++message BindRequest { ++ required string socket_descriptor = 1; ++ required AddressPort proxy_external_ip = 2; ++} ++ ++message BindReply { ++ optional AddressPort proxy_external_ip = 1; ++} ++ ++ ++ ++message GetSocketNameRequest { ++ required string socket_descriptor = 1; ++} ++ ++message GetSocketNameReply { ++ optional AddressPort proxy_external_ip = 2; ++} ++ ++ ++ ++message GetPeerNameRequest { ++ required string socket_descriptor = 1; ++} ++ ++message GetPeerNameReply { ++ optional AddressPort peer_ip = 2; ++} ++ ++ ++message SocketOption { ++ ++ enum SocketOptionLevel { ++ SOCKET_SOL_IP = 0; ++ SOCKET_SOL_SOCKET = 1; ++ SOCKET_SOL_TCP = 6; ++ SOCKET_SOL_UDP = 17; ++ } ++ ++ enum SocketOptionName { ++ option allow_alias = true; ++ ++ SOCKET_SO_DEBUG = 1; ++ SOCKET_SO_REUSEADDR = 2; ++ SOCKET_SO_TYPE = 3; ++ SOCKET_SO_ERROR = 4; ++ SOCKET_SO_DONTROUTE = 5; ++ SOCKET_SO_BROADCAST = 6; ++ SOCKET_SO_SNDBUF = 7; ++ SOCKET_SO_RCVBUF = 8; ++ SOCKET_SO_KEEPALIVE = 9; ++ SOCKET_SO_OOBINLINE = 10; ++ SOCKET_SO_LINGER = 13; ++ SOCKET_SO_RCVTIMEO = 20; ++ SOCKET_SO_SNDTIMEO = 21; ++ ++ SOCKET_IP_TOS = 1; ++ SOCKET_IP_TTL = 2; ++ SOCKET_IP_HDRINCL = 3; ++ SOCKET_IP_OPTIONS = 4; ++ ++ SOCKET_TCP_NODELAY = 1; ++ SOCKET_TCP_MAXSEG = 2; ++ SOCKET_TCP_CORK = 3; ++ SOCKET_TCP_KEEPIDLE = 4; ++ SOCKET_TCP_KEEPINTVL = 5; ++ SOCKET_TCP_KEEPCNT = 6; ++ SOCKET_TCP_SYNCNT = 7; ++ SOCKET_TCP_LINGER2 = 8; ++ SOCKET_TCP_DEFER_ACCEPT = 9; ++ SOCKET_TCP_WINDOW_CLAMP = 10; ++ SOCKET_TCP_INFO = 11; ++ SOCKET_TCP_QUICKACK = 12; ++ } ++ ++ required SocketOptionLevel level = 1; ++ required SocketOptionName option = 2; ++ required bytes value = 3; ++} ++ ++ ++message SetSocketOptionsRequest { ++ required string socket_descriptor = 1; ++ repeated SocketOption options = 2; ++} ++ ++message SetSocketOptionsReply { ++} ++ ++message GetSocketOptionsRequest { ++ required string socket_descriptor = 1; ++ repeated SocketOption options = 2; ++} ++ ++message GetSocketOptionsReply { ++ repeated SocketOption options = 2; ++} ++ ++ ++message ConnectRequest { ++ required string socket_descriptor = 1; ++ required AddressPort remote_ip = 2; ++ optional double timeout_seconds = 3 [default=-1]; ++} ++ ++message ConnectReply { ++ optional AddressPort proxy_external_ip = 1; ++ ++ extensions 1000 to max; ++} ++ ++ ++message ListenRequest { ++ required string socket_descriptor = 1; ++ required int32 backlog = 2; ++} ++ ++message ListenReply { ++} ++ ++ ++message AcceptRequest { ++ required string socket_descriptor = 1; ++ optional double timeout_seconds = 2 [default=-1]; ++} ++ ++message AcceptReply { ++ optional bytes new_socket_descriptor = 2; ++ optional AddressPort remote_address = 3; ++} ++ ++ ++ ++message ShutDownRequest { ++ enum How { ++ SOCKET_SHUT_RD = 1; ++ SOCKET_SHUT_WR = 2; ++ SOCKET_SHUT_RDWR = 3; ++ } ++ required string socket_descriptor = 1; ++ required How how = 2; ++ required int64 send_offset = 3; ++} ++ ++message ShutDownReply { ++} ++ ++ ++ ++message CloseRequest { ++ required string socket_descriptor = 1; ++ optional int64 send_offset = 2 [default=-1]; ++} ++ ++message CloseReply { ++} ++ ++ ++ ++message SendRequest { ++ required string socket_descriptor = 1; ++ required bytes data = 2 [ctype=CORD]; ++ required int64 stream_offset = 3; ++ optional int32 flags = 4 [default=0]; ++ optional AddressPort send_to = 5; ++ optional double timeout_seconds = 6 [default=-1]; ++} ++ ++message SendReply { ++ optional int32 data_sent = 1; ++} ++ ++ ++message ReceiveRequest { ++ enum Flags { ++ MSG_OOB = 1; ++ MSG_PEEK = 2; ++ } ++ required string socket_descriptor = 1; ++ required int32 data_size = 2; ++ optional int32 flags = 3 [default=0]; ++ optional double timeout_seconds = 5 [default=-1]; ++} ++ ++message ReceiveReply { ++ optional int64 stream_offset = 2; ++ optional bytes data = 3 [ctype=CORD]; ++ optional AddressPort received_from = 4; ++ optional int32 buffer_size = 5; ++} ++ ++ ++ ++message PollEvent { ++ ++ enum PollEventFlag { ++ SOCKET_POLLNONE = 0; ++ SOCKET_POLLIN = 1; ++ SOCKET_POLLPRI = 2; ++ SOCKET_POLLOUT = 4; ++ SOCKET_POLLERR = 8; ++ SOCKET_POLLHUP = 16; ++ SOCKET_POLLNVAL = 32; ++ SOCKET_POLLRDNORM = 64; ++ SOCKET_POLLRDBAND = 128; ++ SOCKET_POLLWRNORM = 256; ++ SOCKET_POLLWRBAND = 512; ++ SOCKET_POLLMSG = 1024; ++ SOCKET_POLLREMOVE = 4096; ++ SOCKET_POLLRDHUP = 8192; ++ }; ++ ++ required string socket_descriptor = 1; ++ required int32 requested_events = 2; ++ required int32 observed_events = 3; ++} ++ ++message PollRequest { ++ repeated PollEvent events = 1; ++ optional double timeout_seconds = 2 [default=-1]; ++} ++ ++message PollReply { ++ repeated PollEvent events = 2; ++} ++ ++message ResolveRequest { ++ required string name = 1; ++ repeated CreateSocketRequest.SocketFamily address_families = 2; ++} ++ ++message ResolveReply { ++ enum ErrorCode { ++ SOCKET_EAI_ADDRFAMILY = 1; ++ SOCKET_EAI_AGAIN = 2; ++ SOCKET_EAI_BADFLAGS = 3; ++ SOCKET_EAI_FAIL = 4; ++ SOCKET_EAI_FAMILY = 5; ++ SOCKET_EAI_MEMORY = 6; ++ SOCKET_EAI_NODATA = 7; ++ SOCKET_EAI_NONAME = 8; ++ SOCKET_EAI_SERVICE = 9; ++ SOCKET_EAI_SOCKTYPE = 10; ++ SOCKET_EAI_SYSTEM = 11; ++ SOCKET_EAI_BADHINTS = 12; ++ SOCKET_EAI_PROTOCOL = 13; ++ SOCKET_EAI_OVERFLOW = 14; ++ SOCKET_EAI_MAX = 15; ++ }; ++ ++ repeated bytes packed_address = 2; ++ optional string canonical_name = 3; ++ repeated string aliases = 4; ++} +diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go +new file mode 100644 +index 00000000000..3de46df826b +--- /dev/null ++++ b/vendor/google.golang.org/appengine/socket/doc.go +@@ -0,0 +1,10 @@ ++// Copyright 2012 Google Inc. All rights reserved. ++// Use of this source code is governed by the Apache 2.0 ++// license that can be found in the LICENSE file. ++ ++// Package socket provides outbound network sockets. ++// ++// This package is only required in the classic App Engine environment. ++// Applications running only in App Engine "flexible environment" should ++// use the standard library's net package. ++package socket +diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go +new file mode 100644 +index 00000000000..0ad50e2d36d +--- /dev/null ++++ b/vendor/google.golang.org/appengine/socket/socket_classic.go +@@ -0,0 +1,290 @@ ++// Copyright 2012 Google Inc. All rights reserved. ++// Use of this source code is governed by the Apache 2.0 ++// license that can be found in the LICENSE file. ++ ++// +build appengine ++ ++package socket ++ ++import ( ++ "fmt" ++ "io" ++ "net" ++ "strconv" ++ "time" ++ ++ "github.com/golang/protobuf/proto" ++ "golang.org/x/net/context" ++ "google.golang.org/appengine/internal" ++ ++ pb "google.golang.org/appengine/internal/socket" ++) ++ ++// Dial connects to the address addr on the network protocol. ++// The address format is host:port, where host may be a hostname or an IP address. ++// Known protocols are "tcp" and "udp". ++// The returned connection satisfies net.Conn, and is valid while ctx is valid; ++// if the connection is to be used after ctx becomes invalid, invoke SetContext ++// with the new context. ++func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { ++ return DialTimeout(ctx, protocol, addr, 0) ++} ++ ++var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ ++ pb.CreateSocketRequest_IPv4, ++ pb.CreateSocketRequest_IPv6, ++} ++ ++// DialTimeout is like Dial but takes a timeout. ++// The timeout includes name resolution, if required. ++func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { ++ dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. ++ if timeout > 0 { ++ var cancel context.CancelFunc ++ dialCtx, cancel = context.WithTimeout(ctx, timeout) ++ defer cancel() ++ } ++ ++ host, portStr, err := net.SplitHostPort(addr) ++ if err != nil { ++ return nil, err ++ } ++ port, err := strconv.Atoi(portStr) ++ if err != nil { ++ return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) ++ } ++ ++ var prot pb.CreateSocketRequest_SocketProtocol ++ switch protocol { ++ case "tcp": ++ prot = pb.CreateSocketRequest_TCP ++ case "udp": ++ prot = pb.CreateSocketRequest_UDP ++ default: ++ return nil, fmt.Errorf("socket: unknown protocol %q", protocol) ++ } ++ ++ packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) ++ if err != nil { ++ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) ++ } ++ if len(packedAddrs) == 0 { ++ return nil, fmt.Errorf("no addresses for %q", host) ++ } ++ ++ packedAddr := packedAddrs[0] // use first address ++ fam := pb.CreateSocketRequest_IPv4 ++ if len(packedAddr) == net.IPv6len { ++ fam = pb.CreateSocketRequest_IPv6 ++ } ++ ++ req := &pb.CreateSocketRequest{ ++ Family: fam.Enum(), ++ Protocol: prot.Enum(), ++ RemoteIp: &pb.AddressPort{ ++ Port: proto.Int32(int32(port)), ++ PackedAddress: packedAddr, ++ }, ++ } ++ if resolved { ++ req.RemoteIp.HostnameHint = &host ++ } ++ res := &pb.CreateSocketReply{} ++ if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { ++ return nil, err ++ } ++ ++ return &Conn{ ++ ctx: ctx, ++ desc: res.GetSocketDescriptor(), ++ prot: prot, ++ local: res.ProxyExternalIp, ++ remote: req.RemoteIp, ++ }, nil ++} ++ ++// LookupIP returns the given host's IP addresses. ++func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { ++ packedAddrs, _, err := resolve(ctx, ipFamilies, host) ++ if err != nil { ++ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) ++ } ++ addrs = make([]net.IP, len(packedAddrs)) ++ for i, pa := range packedAddrs { ++ addrs[i] = net.IP(pa) ++ } ++ return addrs, nil ++} ++ ++func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { ++ // Check if it's an IP address. ++ if ip := net.ParseIP(host); ip != nil { ++ if ip := ip.To4(); ip != nil { ++ return [][]byte{ip}, false, nil ++ } ++ return [][]byte{ip}, false, nil ++ } ++ ++ req := &pb.ResolveRequest{ ++ Name: &host, ++ AddressFamilies: fams, ++ } ++ res := &pb.ResolveReply{} ++ if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { ++ // XXX: need to map to pb.ResolveReply_ErrorCode? ++ return nil, false, err ++ } ++ return res.PackedAddress, true, nil ++} ++ ++// withDeadline is like context.WithDeadline, except it ignores the zero deadline. ++func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { ++ if deadline.IsZero() { ++ return parent, func() {} ++ } ++ return context.WithDeadline(parent, deadline) ++} ++ ++// Conn represents a socket connection. ++// It implements net.Conn. ++type Conn struct { ++ ctx context.Context ++ desc string ++ offset int64 ++ ++ prot pb.CreateSocketRequest_SocketProtocol ++ local, remote *pb.AddressPort ++ ++ readDeadline, writeDeadline time.Time // optional ++} ++ ++// SetContext sets the context that is used by this Conn. ++// It is usually used only when using a Conn that was created in a different context, ++// such as when a connection is created during a warmup request but used while ++// servicing a user request. ++func (cn *Conn) SetContext(ctx context.Context) { ++ cn.ctx = ctx ++} ++ ++func (cn *Conn) Read(b []byte) (n int, err error) { ++ const maxRead = 1 << 20 ++ if len(b) > maxRead { ++ b = b[:maxRead] ++ } ++ ++ req := &pb.ReceiveRequest{ ++ SocketDescriptor: &cn.desc, ++ DataSize: proto.Int32(int32(len(b))), ++ } ++ res := &pb.ReceiveReply{} ++ if !cn.readDeadline.IsZero() { ++ req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) ++ } ++ ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) ++ defer cancel() ++ if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { ++ return 0, err ++ } ++ if len(res.Data) == 0 { ++ return 0, io.EOF ++ } ++ if len(res.Data) > len(b) { ++ return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) ++ } ++ return copy(b, res.Data), nil ++} ++ ++func (cn *Conn) Write(b []byte) (n int, err error) { ++ const lim = 1 << 20 // max per chunk ++ ++ for n < len(b) { ++ chunk := b[n:] ++ if len(chunk) > lim { ++ chunk = chunk[:lim] ++ } ++ ++ req := &pb.SendRequest{ ++ SocketDescriptor: &cn.desc, ++ Data: chunk, ++ StreamOffset: &cn.offset, ++ } ++ res := &pb.SendReply{} ++ if !cn.writeDeadline.IsZero() { ++ req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) ++ } ++ ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) ++ defer cancel() ++ if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { ++ // assume zero bytes were sent in this RPC ++ break ++ } ++ n += int(res.GetDataSent()) ++ cn.offset += int64(res.GetDataSent()) ++ } ++ ++ return ++} ++ ++func (cn *Conn) Close() error { ++ req := &pb.CloseRequest{ ++ SocketDescriptor: &cn.desc, ++ } ++ res := &pb.CloseReply{} ++ if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { ++ return err ++ } ++ cn.desc = "CLOSED" ++ return nil ++} ++ ++func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { ++ if ap == nil { ++ return nil ++ } ++ switch prot { ++ case pb.CreateSocketRequest_TCP: ++ return &net.TCPAddr{ ++ IP: net.IP(ap.PackedAddress), ++ Port: int(*ap.Port), ++ } ++ case pb.CreateSocketRequest_UDP: ++ return &net.UDPAddr{ ++ IP: net.IP(ap.PackedAddress), ++ Port: int(*ap.Port), ++ } ++ } ++ panic("unknown protocol " + prot.String()) ++} ++ ++func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } ++func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } ++ ++func (cn *Conn) SetDeadline(t time.Time) error { ++ cn.readDeadline = t ++ cn.writeDeadline = t ++ return nil ++} ++ ++func (cn *Conn) SetReadDeadline(t time.Time) error { ++ cn.readDeadline = t ++ return nil ++} ++ ++func (cn *Conn) SetWriteDeadline(t time.Time) error { ++ cn.writeDeadline = t ++ return nil ++} ++ ++// KeepAlive signals that the connection is still in use. ++// It may be called to prevent the socket being closed due to inactivity. ++func (cn *Conn) KeepAlive() error { ++ req := &pb.GetSocketNameRequest{ ++ SocketDescriptor: &cn.desc, ++ } ++ res := &pb.GetSocketNameReply{} ++ return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) ++} ++ ++func init() { ++ internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) ++} +diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go +new file mode 100644 +index 00000000000..c804169a1c0 +--- /dev/null ++++ b/vendor/google.golang.org/appengine/socket/socket_vm.go +@@ -0,0 +1,64 @@ ++// Copyright 2015 Google Inc. All rights reserved. ++// Use of this source code is governed by the Apache 2.0 ++// license that can be found in the LICENSE file. ++ ++// +build !appengine ++ ++package socket ++ ++import ( ++ "net" ++ "time" ++ ++ "golang.org/x/net/context" ++) ++ ++// Dial connects to the address addr on the network protocol. ++// The address format is host:port, where host may be a hostname or an IP address. ++// Known protocols are "tcp" and "udp". ++// The returned connection satisfies net.Conn, and is valid while ctx is valid; ++// if the connection is to be used after ctx becomes invalid, invoke SetContext ++// with the new context. ++func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { ++ conn, err := net.Dial(protocol, addr) ++ if err != nil { ++ return nil, err ++ } ++ return &Conn{conn}, nil ++} ++ ++// DialTimeout is like Dial but takes a timeout. ++// The timeout includes name resolution, if required. ++func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { ++ conn, err := net.DialTimeout(protocol, addr, timeout) ++ if err != nil { ++ return nil, err ++ } ++ return &Conn{conn}, nil ++} ++ ++// LookupIP returns the given host's IP addresses. ++func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { ++ return net.LookupIP(host) ++} ++ ++// Conn represents a socket connection. ++// It implements net.Conn. ++type Conn struct { ++ net.Conn ++} ++ ++// SetContext sets the context that is used by this Conn. ++// It is usually used only when using a Conn that was created in a different context, ++// such as when a connection is created during a warmup request but used while ++// servicing a user request. ++func (cn *Conn) SetContext(ctx context.Context) { ++ // This function is not required in App Engine "flexible environment". ++} ++ ++// KeepAlive signals that the connection is still in use. ++// It may be called to prevent the socket being closed due to inactivity. ++func (cn *Conn) KeepAlive() error { ++ // This function is not required in App Engine "flexible environment". ++ return nil ++} +diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +index dbe2e2d0c65..6ce01ac9a69 100644 +--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go ++++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +@@ -15,7 +15,7 @@ + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: + // protoc-gen-go v1.26.0 +-// protoc v3.21.9 ++// protoc v3.21.12 + // source: google/api/field_behavior.proto + + package annotations +@@ -78,6 +78,19 @@ const ( + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 ++ // Denotes that the field in a resource (a message annotated with ++ // google.api.resource) is used in the resource name to uniquely identify the ++ // resource. For AIP-compliant APIs, this should only be applied to the ++ // `name` field on the resource. ++ // ++ // This behavior should not be applied to references to other resources within ++ // the message. ++ // ++ // The identifier field of resources often have different field behavior ++ // depending on the request it is embedded in (e.g. for Create methods name ++ // is optional and unused, while for Update methods it is required). Instead ++ // of method-specific annotations, only `IDENTIFIER` is required. ++ FieldBehavior_IDENTIFIER FieldBehavior = 8 + ) + + // Enum value maps for FieldBehavior. +@@ -91,6 +104,7 @@ var ( + 5: "IMMUTABLE", + 6: "UNORDERED_LIST", + 7: "NON_EMPTY_DEFAULT", ++ 8: "IDENTIFIER", + } + FieldBehavior_value = map[string]int32{ + "FIELD_BEHAVIOR_UNSPECIFIED": 0, +@@ -101,6 +115,7 @@ var ( + "IMMUTABLE": 5, + "UNORDERED_LIST": 6, + "NON_EMPTY_DEFAULT": 7, ++ "IDENTIFIER": 8, + } + ) + +@@ -169,7 +184,7 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ + 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, +- 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, ++ 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, + 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, +@@ -179,7 +194,8 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, + 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, + 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, +- 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, ++ 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e, ++ 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, +diff --git a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go b/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go +new file mode 100644 +index 00000000000..1d3f1b5b7ef +--- /dev/null ++++ b/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go +@@ -0,0 +1,23 @@ ++// Copyright 2023 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// This file, and the {{.RootMod}} import, won't actually become part of ++// the resultant binary. ++//go:build modhack ++// +build modhack ++ ++package api ++ ++// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository ++import _ "google.golang.org/genproto/internal" +diff --git a/vendor/google.golang.org/genproto/internal/doc.go b/vendor/google.golang.org/genproto/internal/doc.go +new file mode 100644 +index 00000000000..90e89b4aa3f +--- /dev/null ++++ b/vendor/google.golang.org/genproto/internal/doc.go +@@ -0,0 +1,17 @@ ++// Copyright 2023 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// This file makes internal an importable go package ++// for use with backreferences from submodules. ++package internal +diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md +index 0e6ae69a584..ab0fbb79b86 100644 +--- a/vendor/google.golang.org/grpc/README.md ++++ b/vendor/google.golang.org/grpc/README.md +@@ -1,8 +1,8 @@ + # gRPC-Go + +-[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) + [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] + [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) ++[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) + + The [Go][] implementation of [gRPC][]: A high performance, open source, general + RPC framework that puts mobile and HTTP/2 first. For more information see the +@@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the + + ## Installation + +-With [Go module][] support (Go 1.11+), simply add the following import ++Simply add the following import to your code, and then `go [build|run|test]` ++will automatically fetch the necessary dependencies: ++ + + ```go + import "google.golang.org/grpc" + ``` + +-to your code, and then `go [build|run|test]` will automatically fetch the +-necessary dependencies. +- +-Otherwise, to install the `grpc-go` package, run the following command: +- +-```console +-$ go get -u google.golang.org/grpc +-``` +- + > **Note:** If you are trying to access `grpc-go` from **China**, see the + > [FAQ](#FAQ) below. + +@@ -56,15 +49,6 @@ To build Go code, there are several options: + + - Set up a VPN and access google.golang.org through that. + +-- Without Go module support: `git clone` the repo manually: +- +- ```sh +- git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc +- ``` +- +- You will need to do the same for all of grpc's dependencies in `golang.org`, +- e.g. `golang.org/x/net`. +- + - With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + +@@ -76,33 +60,13 @@ To build Go code, there are several options: + ``` + + Again, this will need to be done for all transitive dependencies hosted on +- golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). ++ golang.org as well. For details, refer to [golang/go issue ++ #28652](https://github.com/golang/go/issues/28652). + + ### Compiling error, undefined: grpc.SupportPackageIsVersion + +-#### If you are using Go modules: +- +-Ensure your gRPC-Go version is `require`d at the appropriate version in +-the same module containing the generated `.pb.go` files. For example, +-`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: +- +-```go +-module +- +-require ( +- google.golang.org/grpc v1.27.0 +-) +-``` +- +-#### If you are *not* using Go modules: +- +-Update the `proto` package, gRPC package, and rebuild the `.proto` files: +- +-```sh +-go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +-go get -u google.golang.org/grpc +-protoc --go_out=plugins=grpc:. *.proto +-``` ++Please update to the latest version of gRPC-Go using ++`go get google.golang.org/grpc`. + + ### How to turn on logging + +@@ -121,9 +85,11 @@ possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown +- 1. Keepalive parameters caused connection shutdown, for example if you have configured +- your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). +- If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), ++ 1. Keepalive parameters caused connection shutdown, for example if you have ++ configured your server to terminate connections regularly to [trigger DNS ++ lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). ++ If this is the case, you may want to increase your ++ [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. + + It can be tricky to debug this because the error happens on the client side but +diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go +index 3efca459149..52d530d7ad0 100644 +--- a/vendor/google.golang.org/grpc/attributes/attributes.go ++++ b/vendor/google.golang.org/grpc/attributes/attributes.go +@@ -34,26 +34,26 @@ import ( + // key/value pairs. Keys must be hashable, and users should define their own + // types for keys. Values should not be modified after they are added to an + // Attributes or if they were received from one. If values implement 'Equal(o +-// interface{}) bool', it will be called by (*Attributes).Equal to determine +-// whether two values with the same key should be considered equal. ++// any) bool', it will be called by (*Attributes).Equal to determine whether ++// two values with the same key should be considered equal. + type Attributes struct { +- m map[interface{}]interface{} ++ m map[any]any + } + + // New returns a new Attributes containing the key/value pair. +-func New(key, value interface{}) *Attributes { +- return &Attributes{m: map[interface{}]interface{}{key: value}} ++func New(key, value any) *Attributes { ++ return &Attributes{m: map[any]any{key: value}} + } + + // WithValue returns a new Attributes containing the previous keys and values + // and the new key/value pair. If the same key appears multiple times, the + // last value overwrites all previous values for that key. To remove an + // existing key, use a nil value. value should not be modified later. +-func (a *Attributes) WithValue(key, value interface{}) *Attributes { ++func (a *Attributes) WithValue(key, value any) *Attributes { + if a == nil { + return New(key, value) + } +- n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} ++ n := &Attributes{m: make(map[any]any, len(a.m)+1)} + for k, v := range a.m { + n.m[k] = v + } +@@ -63,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { + + // Value returns the value associated with these attributes for key, or nil if + // no value is associated with key. The returned value should not be modified. +-func (a *Attributes) Value(key interface{}) interface{} { ++func (a *Attributes) Value(key any) any { + if a == nil { + return nil + } + return a.m[key] + } + +-// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +-// bool' is implemented for a value in the attributes, it is called to +-// determine if the value matches the one stored in the other attributes. If +-// Equal is not implemented, standard equality is used to determine if the two +-// values are equal. Note that some types (e.g. maps) aren't comparable by +-// default, so they must be wrapped in a struct, or in an alias type, with Equal +-// defined. ++// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is ++// implemented for a value in the attributes, it is called to determine if the ++// value matches the one stored in the other attributes. If Equal is not ++// implemented, standard equality is used to determine if the two values are ++// equal. Note that some types (e.g. maps) aren't comparable by default, so ++// they must be wrapped in a struct, or in an alias type, with Equal defined. + func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true +@@ -93,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { + // o missing element of a + return false + } +- if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { ++ if eq, ok := v.(interface{ Equal(o any) bool }); ok { + if !eq.Equal(ov) { + return false + } +@@ -112,19 +111,31 @@ func (a *Attributes) String() string { + sb.WriteString("{") + first := true + for k, v := range a.m { +- var key, val string +- if str, ok := k.(interface{ String() string }); ok { +- key = str.String() +- } +- if str, ok := v.(interface{ String() string }); ok { +- val = str.String() +- } + if !first { + sb.WriteString(", ") + } +- sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) ++ sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() + } ++ ++func str(x any) (s string) { ++ if v, ok := x.(fmt.Stringer); ok { ++ return fmt.Sprint(v) ++ } else if v, ok := x.(string); ok { ++ return v ++ } ++ return fmt.Sprintf("<%p>", x) ++} ++ ++// MarshalJSON helps implement the json.Marshaler interface, thereby rendering ++// the Attributes correctly when printing (via pretty.JSON) structs containing ++// Attributes as fields. ++// ++// Is it impossible to unmarshal attributes from a JSON representation and this ++// method is meant only for debugging purposes. ++func (a *Attributes) MarshalJSON() ([]byte, error) { ++ return []byte(a.String()), nil ++} +diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go +index 8f00523c0e2..d79560a2e26 100644 +--- a/vendor/google.golang.org/grpc/balancer/balancer.go ++++ b/vendor/google.golang.org/grpc/balancer/balancer.go +@@ -30,6 +30,7 @@ import ( + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +@@ -39,6 +40,8 @@ import ( + var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) ++ ++ logger = grpclog.Component("balancer") + ) + + // Register registers the balancer builder to the balancer map. b.Name +@@ -51,6 +54,12 @@ var ( + // an init() function), and is not thread-safe. If multiple Balancers are + // registered with the same name, the one registered last will take effect. + func Register(b Builder) { ++ if strings.ToLower(b.Name()) != b.Name() { ++ // TODO: Skip the use of strings.ToLower() to index the map after v1.59 ++ // is released to switch to case sensitive balancer registry. Also, ++ // remove this warning and update the docstrings for Register and Get. ++ logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) ++ } + m[strings.ToLower(b.Name())] = b + } + +@@ -70,6 +79,12 @@ func init() { + // Note that the compare is done in a case-insensitive fashion. + // If no builder is register with the name, nil will be returned. + func Get(name string) Builder { ++ if strings.ToLower(name) != name { ++ // TODO: Skip the use of strings.ToLower() to index the map after v1.59 ++ // is released to switch to case sensitive balancer registry. Also, ++ // remove this warning and update the docstrings for Register and Get. ++ logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) ++ } + if b, ok := m[strings.ToLower(name)]; ok { + return b + } +@@ -105,8 +120,8 @@ type SubConn interface { + // + // This will trigger a state transition for the SubConn. + // +- // Deprecated: This method is now part of the ClientConn interface and will +- // eventually be removed from here. ++ // Deprecated: this method will be removed. Create new SubConns for new ++ // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +@@ -115,6 +130,13 @@ type SubConn interface { + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) ++ // Shutdown shuts down the SubConn gracefully. Any started RPCs will be ++ // allowed to complete. No future calls should be made on the SubConn. ++ // One final state update will be delivered to the StateListener (or ++ // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to ++ // indicate the shutdown operation. This may be delivered before ++ // in-progress RPCs are complete and the actual connection is closed. ++ Shutdown() + } + + // NewSubConnOptions contains options to create new SubConn. +@@ -129,6 +151,11 @@ type NewSubConnOptions struct { + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool ++ // StateListener is called when the state of the subconn changes. If nil, ++ // Balancer.UpdateSubConnState will be called instead. Will never be ++ // invoked until after Connect() is called on the SubConn created with ++ // these options. ++ StateListener func(SubConnState) + } + + // State contains the balancer's state relevant to the gRPC ClientConn. +@@ -150,16 +177,24 @@ type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. ++ // ++ // Deprecated: please be aware that in a future version, SubConns will only ++ // support one address per SubConn. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. ++ // ++ // Deprecated: use SubConn.Shutdown instead. + RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // +- // This will trigger a state transition for the SubConn. ++ // This may trigger a state transition for the SubConn. ++ // ++ // Deprecated: this method will be removed. Create new SubConns for new ++ // addresses instead. + UpdateAddresses(SubConn, []resolver.Address) + + // UpdateState notifies gRPC that the balancer's internal state has +@@ -250,7 +285,7 @@ type DoneInfo struct { + // trailing metadata. + // + // The only supported type now is *orca_v3.LoadReport. +- ServerLoad interface{} ++ ServerLoad any + } + + var ( +@@ -343,9 +378,13 @@ type Balancer interface { + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. ++ // ++ // Deprecated: Use NewSubConnOptions.StateListener when creating the ++ // SubConn instead. + UpdateSubConnState(SubConn, SubConnState) +- // Close closes the balancer. The balancer is not required to call +- // ClientConn.RemoveSubConn for its existing SubConns. ++ // Close closes the balancer. The balancer is not currently required to ++ // call SubConn.Shutdown for its existing SubConns; however, this will be ++ // required in a future release, so it is recommended. + Close() + } + +@@ -390,15 +429,14 @@ var ErrBadResolverState = errors.New("bad resolver state") + type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the +- // associated SubConn), but is declared as interface{} to avoid a +- // dependency cycle. Should also return a close function that will be +- // called when all references to the Producer have been given up. +- Build(grpcClientConnInterface interface{}) (p Producer, close func()) ++ // associated SubConn), but is declared as `any` to avoid a dependency ++ // cycle. Should also return a close function that will be called when all ++ // references to the Producer have been given up. ++ Build(grpcClientConnInterface any) (p Producer, close func()) + } + + // A Producer is a type shared among potentially many consumers. It is + // associated with a SubConn, and an implementation will typically contain + // other methods to provide additional functionality, e.g. configuration or + // subscription registration. +-type Producer interface { +-} ++type Producer any +diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go +index 3929c26d31e..a7f1eeec8e6 100644 +--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go ++++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go +@@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { + // a is a new address (not existing in b.subConns). +- sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) ++ var sc balancer.SubConn ++ opts := balancer.NewSubConnOptions{ ++ HealthCheckEnabled: b.config.HealthCheck, ++ StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, ++ } ++ sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue +@@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + sc := sci.(balancer.SubConn) + // a was removed by resolver. + if _, ok := addrsSet.Get(a); !ok { +- b.cc.RemoveSubConn(sc) ++ sc.Shutdown() + b.subConns.Delete(a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. +- // The entry will be deleted in UpdateSubConnState. ++ // The entry will be deleted in updateSubConnState. + } + } + // If resolver state contains no addresses, return an error so ClientConn +@@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) + } + ++// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. + func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { ++ logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) ++} ++ ++func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) +@@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: +- // When an address was removed by resolver, b called RemoveSubConn but +- // kept the sc's state in scStates. Remove state for this sc here. ++ // When an address was removed by resolver, b called Shutdown but kept ++ // the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. +@@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su + } + + // Close is a nop because base balancer doesn't have internal state to clean up, +-// and it doesn't need to call RemoveSubConn for the SubConns. ++// and it doesn't need to call Shutdown for the SubConns. + func (b *baseBalancer) Close() { + } + +diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +index 04b9ad41169..a4411c22bfc 100644 +--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go ++++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +@@ -99,20 +99,6 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { +- // If the addresses specified in the update contain addresses of type +- // "grpclb" and the selected LB policy is not "grpclb", these addresses +- // will be filtered out and ccs will be modified with the updated +- // address list. +- if ccb.curBalancerName != grpclbName { +- var addrs []resolver.Address +- for _, addr := range ccs.ResolverState.Addresses { +- if addr.Type == resolver.GRPCLB { +- continue +- } +- addrs = append(addrs, addr) +- } +- ccs.ResolverState.Addresses = addrs +- } + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { +@@ -139,7 +125,9 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat + func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { +- ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) ++ // Even though it is optional for balancers, gracefulswitch ensures ++ // opts.StateListener is set, so this cannot ever be nil. ++ sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) + ccb.mu.Unlock() + } +@@ -221,7 +209,7 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + } + + ccb.mode = m +- done := ccb.serializer.Done ++ done := ccb.serializer.Done() + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent +@@ -238,11 +226,9 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + } + ccb.mu.Unlock() + +- // Give enqueued callbacks a chance to finish. ++ // Give enqueued callbacks a chance to finish before closing the balancer. + <-done +- // Spawn a goroutine to close the balancer (since it may block trying to +- // cleanup all allocated resources) and return early. +- go b.Close() ++ b.Close() + } + + // exitIdleMode is invoked by grpc when the channel exits idle mode either +@@ -314,29 +300,19 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } +- acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} ++ acbw := &acBalancerWrapper{ ++ ccb: ccb, ++ ac: ac, ++ producers: make(map[balancer.ProducerBuilder]*refCountedProducer), ++ stateListener: opts.StateListener, ++ } + ac.acbw = acbw + return acbw, nil + } + + func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +- if ccb.isIdleOrClosed() { +- // It it safe to ignore this call when the balancer is closed or in idle +- // because the ClientConn takes care of closing the connections. +- // +- // Not returning early from here when the balancer is closed or in idle +- // leads to a deadlock though, because of the following sequence of +- // calls when holding cc.mu: +- // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> +- // ccb.RemoveAddrConn --> cc.removeAddrConn +- return +- } +- +- acbw, ok := sc.(*acBalancerWrapper) +- if !ok { +- return +- } +- ccb.cc.removeAddrConn(acbw.ac, errConnDrain) ++ // The graceful switch balancer will never call this. ++ logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") + } + + func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { +@@ -380,7 +356,9 @@ func (ccb *ccBalancerWrapper) Target() string { + // acBalancerWrapper is a wrapper on top of ac for balancers. + // It implements balancer.SubConn interface. + type acBalancerWrapper struct { +- ac *addrConn // read-only ++ ac *addrConn // read-only ++ ccb *ccBalancerWrapper // read-only ++ stateListener func(balancer.SubConnState) + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +@@ -398,6 +376,23 @@ func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() + } + ++func (acbw *acBalancerWrapper) Shutdown() { ++ ccb := acbw.ccb ++ if ccb.isIdleOrClosed() { ++ // It it safe to ignore this call when the balancer is closed or in idle ++ // because the ClientConn takes care of closing the connections. ++ // ++ // Not returning early from here when the balancer is closed or in idle ++ // leads to a deadlock though, because of the following sequence of ++ // calls when holding cc.mu: ++ // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> ++ // ccb.RemoveAddrConn --> cc.removeAddrConn ++ return ++ } ++ ++ ccb.cc.removeAddrConn(acbw.ac, errConnDrain) ++} ++ + // NewStream begins a streaming RPC on the addrConn. If the addrConn is not + // ready, blocks until it is or ctx expires. Returns an error when the context + // expires or the addrConn is shut down. +@@ -411,7 +406,7 @@ func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, + + // Invoke performs a unary RPC. If the addrConn is not ready, returns + // errSubConnNotReady. +-func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { ++func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err +diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +index ec2c2fa14dd..5954801122a 100644 +--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go ++++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +@@ -18,7 +18,7 @@ + + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: +-// protoc-gen-go v1.30.0 ++// protoc-gen-go v1.31.0 + // protoc v4.22.0 + // source: grpc/binlog/v1/binarylog.proto + +diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go +index e6a1dc5d75e..788c89c16f9 100644 +--- a/vendor/google.golang.org/grpc/call.go ++++ b/vendor/google.golang.org/grpc/call.go +@@ -26,12 +26,7 @@ import ( + // received. This is typically called by generated code. + // + // All errors returned by Invoke are compatible with the status package. +-func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { +- if err := cc.idlenessMgr.onCallBegin(); err != nil { +- return err +- } +- defer cc.idlenessMgr.onCallEnd() +- ++func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) +@@ -61,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // received. This is typically called by generated code. + // + // DEPRECATED: Use ClientConn.Invoke instead. +-func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { ++func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) + } + + var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +-func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { ++func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err +diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go +index 95a7459b02f..429c389e473 100644 +--- a/vendor/google.golang.org/grpc/clientconn.go ++++ b/vendor/google.golang.org/grpc/clientconn.go +@@ -34,9 +34,12 @@ import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" ++ "google.golang.org/grpc/internal/idle" ++ "google.golang.org/grpc/internal/pretty" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" +@@ -53,8 +56,6 @@ import ( + const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +- // must match grpclbName in grpclb/grpclb.go +- grpclbName = "grpclb" + ) + + var ( +@@ -137,7 +138,6 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires + func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, +- csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), +@@ -190,6 +190,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * + // Register ClientConn with channelz. + cc.channelzRegistration(target) + ++ cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) ++ + if err := cc.validateTransportCredentials(); err != nil { + return nil, err + } +@@ -265,7 +267,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. +- cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) ++ cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) + + // Return early for non-blocking dials. + if !cc.dopts.block { +@@ -316,6 +318,16 @@ func (cc *ClientConn) addTraceEvent(msg string) { + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + } + ++type idler ClientConn ++ ++func (i *idler) EnterIdleMode() error { ++ return (*ClientConn)(i).enterIdleMode() ++} ++ ++func (i *idler) ExitIdleMode() error { ++ return (*ClientConn)(i).exitIdleMode() ++} ++ + // exitIdleMode moves the channel out of idle mode by recreating the name + // resolver and load balancer. + func (cc *ClientConn) exitIdleMode() error { +@@ -325,8 +337,8 @@ func (cc *ClientConn) exitIdleMode() error { + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { ++ channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) + cc.mu.Unlock() +- logger.Info("ClientConn asked to exit idle mode when not in idle mode") + return nil + } + +@@ -349,7 +361,7 @@ func (cc *ClientConn) exitIdleMode() error { + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { +- cc.blockingpicker = newPickerWrapper() ++ cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true +@@ -392,12 +404,13 @@ func (cc *ClientConn) exitIdleMode() error { + // name resolver, load balancer and any subchannels. + func (cc *ClientConn) enterIdleMode() error { + cc.mu.Lock() ++ defer cc.mu.Unlock() ++ + if cc.conns == nil { +- cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { +- logger.Error("ClientConn asked to enter idle mode when not active") ++ channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) + return nil + } + +@@ -418,14 +431,14 @@ func (cc *ClientConn) enterIdleMode() error { + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle +- cc.mu.Unlock() ++ cc.addTraceEvent("entering idle mode") + + go func() { +- cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() ++ + return nil + } + +@@ -474,7 +487,6 @@ func (cc *ClientConn) validateTransportCredentials() error { + func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") +- cc.csMgr.channelzID = cc.channelzID + } + + // chainUnaryClientInterceptors chains all unary client interceptors into one. +@@ -491,7 +503,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { +- chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { ++ chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } +@@ -503,7 +515,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final + if curr == len(interceptors)-1 { + return finalInvoker + } +- return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { ++ return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } + } +@@ -539,13 +551,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr + } + } + ++// newConnectivityStateManager creates an connectivityStateManager with ++// the specified id. ++func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { ++ return &connectivityStateManager{ ++ channelzID: id, ++ pubSub: grpcsync.NewPubSub(ctx), ++ } ++} ++ + // connectivityStateManager keeps the connectivity.State of ClientConn. + // This struct will eventually be exported so the balancers can access it. ++// ++// TODO: If possible, get rid of the `connectivityStateManager` type, and ++// provide this functionality using the `PubSub`, to avoid keeping track of ++// the connectivity state at two places. + type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID *channelz.Identifier ++ pubSub *grpcsync.PubSub + } + + // updateState updates the connectivity.State of ClientConn. +@@ -561,6 +587,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { + return + } + csm.state = state ++ csm.pubSub.Publish(state) ++ + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. +@@ -590,7 +618,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. +- Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error ++ Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) + } +@@ -622,7 +650,7 @@ type ClientConn struct { + channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. +- idlenessMgr idlenessManager ++ idlenessMgr idle.Manager + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. +@@ -668,6 +696,19 @@ const ( + ccIdlenessStateExitingIdle + ) + ++func (s ccIdlenessState) String() string { ++ switch s { ++ case ccIdlenessStateActive: ++ return "active" ++ case ccIdlenessStateIdle: ++ return "idle" ++ case ccIdlenessStateExitingIdle: ++ return "exitingIdle" ++ default: ++ return "unknown" ++ } ++} ++ + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or + // ctx expires. A true value is returned in former case and false in latter. + // +@@ -759,6 +800,16 @@ func init() { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) ++ ++ internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { ++ return cc.csMgr.pubSub.Subscribe(s) ++ } ++ internal.EnterIdleModeForTesting = func(cc *ClientConn) error { ++ return cc.enterIdleMode() ++ } ++ internal.ExitIdleModeForTesting = func(cc *ClientConn) error { ++ return cc.exitIdleMode() ++ } + } + + func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { +@@ -867,6 +918,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi + cc.balancerWrapper.updateSubConnState(sc, s, err) + } + ++// Makes a copy of the input addresses slice and clears out the balancer ++// attributes field. Addresses are passed during subconn creation and address ++// update operations. In both cases, we will clear the balancer attributes by ++// calling this function, and therefore we will be able to use the Equal method ++// provided by the resolver.Address type for comparison. ++func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { ++ out := make([]resolver.Address, len(in)) ++ for i := range in { ++ out[i] = in[i] ++ out[i].BalancerAttributes = nil ++ } ++ return out ++} ++ + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. + // + // Caller needs to make sure len(addrs) > 0. +@@ -874,7 +939,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, +- addrs: addrs, ++ addrs: copyAddressesWithoutBalancerAttributes(addrs), + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), +@@ -995,8 +1060,9 @@ func equalAddresses(a, b []resolver.Address) bool { + // connections or connection attempts. + func (ac *addrConn) updateAddrs(addrs []resolver.Address) { + ac.mu.Lock() +- channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) ++ channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + ++ addrs = copyAddressesWithoutBalancerAttributes(addrs) + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return +@@ -1031,8 +1097,8 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + +- // We have to defer here because GracefulClose => Close => onClose, which +- // requires locking ac.mu. ++ // We have to defer here because GracefulClose => onClose, which requires ++ // locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil +@@ -1137,23 +1203,13 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel + } + + var newBalancerName string +- if cc.sc != nil && cc.sc.lbConfig != nil { ++ if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { ++ // No service config or no LB policy specified in config. ++ newBalancerName = PickFirstBalancerName ++ } else if cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name +- } else { +- var isGRPCLB bool +- for _, a := range addrs { +- if a.Type == resolver.GRPCLB { +- isGRPCLB = true +- break +- } +- } +- if isGRPCLB { +- newBalancerName = grpclbName +- } else if cc.sc != nil && cc.sc.LB != nil { +- newBalancerName = *cc.sc.LB +- } else { +- newBalancerName = PickFirstBalancerName +- } ++ } else { // cc.sc.LB != nil ++ newBalancerName = *cc.sc.LB + } + cc.balancerWrapper.switchTo(newBalancerName) + } +@@ -1192,7 +1248,10 @@ func (cc *ClientConn) ResetConnectBackoff() { + + // Close tears down the ClientConn and all underlying connections. + func (cc *ClientConn) Close() error { +- defer cc.cancel() ++ defer func() { ++ cc.cancel() ++ <-cc.csMgr.pubSub.Done() ++ }() + + cc.mu.Lock() + if cc.conns == nil { +@@ -1226,7 +1285,7 @@ func (cc *ClientConn) Close() error { + rWrapper.close() + } + if idlenessMgr != nil { +- idlenessMgr.close() ++ idlenessMgr.Close() + } + + for ac := range conns { +@@ -1336,12 +1395,14 @@ func (ac *addrConn) resetTransport() { + + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) +- // After exhausting all addresses, the addrConn enters +- // TRANSIENT_FAILURE. ++ ac.mu.Lock() + if acCtx.Err() != nil { ++ // addrConn was torn down. ++ ac.mu.Unlock() + return + } +- ac.mu.Lock() ++ // After exhausting all addresses, the addrConn enters ++ // TRANSIENT_FAILURE. + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. +@@ -1537,7 +1598,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { + + // Set up the health check helper functions. + currentTr := ac.transport +- newStream := func(method string) (interface{}, error) { ++ newStream := func(method string) (any, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() +@@ -1625,16 +1686,7 @@ func (ac *addrConn) tearDown(err error) { + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} +- if err == errConnDrain && curTr != nil { +- // GracefulClose(...) may be executed multiple times when +- // i) receiving multiple GoAway frames from the server; or +- // ii) there are concurrent name resolver/Balancer triggered +- // address removal and GoAway. +- // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. +- ac.mu.Unlock() +- curTr.GracefulClose() +- ac.mu.Lock() +- } ++ + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, +@@ -1648,6 +1700,29 @@ func (ac *addrConn) tearDown(err error) { + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) + ac.mu.Unlock() ++ ++ // We have to release the lock before the call to GracefulClose/Close here ++ // because both of them call onClose(), which requires locking ac.mu. ++ if curTr != nil { ++ if err == errConnDrain { ++ // Close the transport gracefully when the subConn is being shutdown. ++ // ++ // GracefulClose() may be executed multiple times if: ++ // - multiple GoAway frames are received from the server ++ // - there are concurrent name resolver or balancer triggered ++ // address removal and GoAway ++ curTr.GracefulClose() ++ } else { ++ // Hard close the transport when the channel is entering idle or is ++ // being shutdown. In the case where the channel is being shutdown, ++ // closing of transports is also taken care of by cancelation of cc.ctx. ++ // But in the case where the channel is entering idle, we need to ++ // explicitly close the transports here. Instead of distinguishing ++ // between these two cases, it is simpler to close the transport ++ // unconditionally here. ++ curTr.Close(err) ++ } ++ } + } + + func (ac *addrConn) getState() connectivity.State { +@@ -1807,19 +1882,70 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { + } + + // parseTarget uses RFC 3986 semantics to parse the given target into a +-// resolver.Target struct containing scheme, authority and url. Query +-// params are stripped from the endpoint. ++// resolver.Target struct containing url. Query params are stripped from the ++// endpoint. + func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + +- return resolver.Target{ +- Scheme: u.Scheme, +- Authority: u.Host, +- URL: *u, +- }, nil ++ return resolver.Target{URL: *u}, nil ++} ++ ++func encodeAuthority(authority string) string { ++ const upperhex = "0123456789ABCDEF" ++ ++ // Return for characters that must be escaped as per ++ // Valid chars are mentioned here: ++ // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 ++ shouldEscape := func(c byte) bool { ++ // Alphanum are always allowed. ++ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { ++ return false ++ } ++ switch c { ++ case '-', '_', '.', '~': // Unreserved characters ++ return false ++ case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters ++ return false ++ case ':', '[', ']', '@': // Authority related delimeters ++ return false ++ } ++ // Everything else must be escaped. ++ return true ++ } ++ ++ hexCount := 0 ++ for i := 0; i < len(authority); i++ { ++ c := authority[i] ++ if shouldEscape(c) { ++ hexCount++ ++ } ++ } ++ ++ if hexCount == 0 { ++ return authority ++ } ++ ++ required := len(authority) + 2*hexCount ++ t := make([]byte, required) ++ ++ j := 0 ++ // This logic is a barebones version of escape in the go net/url library. ++ for i := 0; i < len(authority); i++ { ++ switch c := authority[i]; { ++ case shouldEscape(c): ++ t[j] = '%' ++ t[j+1] = upperhex[c>>4] ++ t[j+2] = upperhex[c&15] ++ j += 3 ++ default: ++ t[j] = authority[i] ++ j++ ++ } ++ } ++ return string(t) + } + + // Determine channel authority. The order of precedence is as follows: +@@ -1872,7 +1998,11 @@ func (cc *ClientConn) determineAuthority() error { + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. +- cc.authority = endpoint ++ // Escape the endpoint to handle use cases where the endpoint ++ // might not be a valid authority by default. ++ // For example an endpoint which has multiple paths like ++ // 'a/b/c', which is not a valid authority by default. ++ cc.authority = encodeAuthority(endpoint) + } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go +index 12977654781..411e3dfd47c 100644 +--- a/vendor/google.golang.org/grpc/codec.go ++++ b/vendor/google.golang.org/grpc/codec.go +@@ -27,8 +27,8 @@ import ( + // omits the name/string, which vary between the two and are not needed for + // anything besides the registry in the encoding package. + type baseCodec interface { +- Marshal(v interface{}) ([]byte, error) +- Unmarshal(data []byte, v interface{}) error ++ Marshal(v any) ([]byte, error) ++ Unmarshal(data []byte, v any) error + } + + var _ baseCodec = Codec(nil) +@@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) + // Deprecated: use encoding.Codec instead. + type Codec interface { + // Marshal returns the wire format of v. +- Marshal(v interface{}) ([]byte, error) ++ Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. +- Unmarshal(data []byte, v interface{}) error ++ Unmarshal(data []byte, v any) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go +index 15a3d5102a9..cfc9fd85e8d 100644 +--- a/vendor/google.golang.org/grpc/dialoptions.go ++++ b/vendor/google.golang.org/grpc/dialoptions.go +@@ -78,6 +78,7 @@ type dialOptions struct { + defaultServiceConfigRawJSON *string + resolvers []resolver.Builder + idleTimeout time.Duration ++ recvBufferPool SharedBufferPool + } + + // DialOption configures how we set up the connection. +@@ -138,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} + } + ++// WithSharedWriteBuffer allows reusing per-connection transport write buffer. ++// If this option is set to true every connection will release the buffer after ++// flushing the data on the wire. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func WithSharedWriteBuffer(val bool) DialOption { ++ return newFuncDialOption(func(o *dialOptions) { ++ o.copts.SharedWriteBuffer = val ++ }) ++} ++ + // WithWriteBufferSize determines how much data can be batched before doing a + // write on the wire. The corresponding memory allocation for this buffer will + // be twice the size to keep syscalls low. The default value for this buffer is +@@ -628,6 +643,8 @@ func defaultDialOptions() dialOptions { + ReadBufferSize: defaultReadBufSize, + UseProxy: true, + }, ++ recvBufferPool: nopBufferPool{}, ++ idleTimeout: 30 * time.Minute, + } + } + +@@ -664,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { + // channel will exit idle mode when the Connect() method is called or when an + // RPC is initiated. + // +-// By default this feature is disabled, which can also be explicitly configured +-// by passing zero to this function. ++// A default timeout of 30 minutes will be used if this dial option is not set ++// at dial time and idleness can be disabled by passing a timeout of zero. + // + // # Experimental + // +@@ -676,3 +693,24 @@ func WithIdleTimeout(d time.Duration) DialOption { + o.idleTimeout = d + }) + } ++ ++// WithRecvBufferPool returns a DialOption that configures the ClientConn ++// to use the provided shared buffer pool for parsing incoming messages. Depending ++// on the application's workload, this could result in reduced memory allocation. ++// ++// If you are unsure about how to implement a memory pool but want to utilize one, ++// begin with grpc.NewSharedBufferPool. ++// ++// Note: The shared buffer pool feature will not be active if any of the following ++// options are used: WithStatsHandler, EnableTracing, or binary logging. In such ++// cases, the shared buffer pool will be ignored. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { ++ return newFuncDialOption(func(o *dialOptions) { ++ o.recvBufferPool = bufferPool ++ }) ++} +diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go +index 07a5861352a..5ebf88d7147 100644 +--- a/vendor/google.golang.org/grpc/encoding/encoding.go ++++ b/vendor/google.golang.org/grpc/encoding/encoding.go +@@ -38,6 +38,10 @@ const Identity = "identity" + + // Compressor is used for compressing and decompressing when sending or + // receiving messages. ++// ++// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, ++// gRPC will invoke it to determine the size of the buffer allocated for the ++// result of decompression. A return value of -1 indicates unknown size. + type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned +@@ -51,15 +55,6 @@ type Compressor interface { + // coding header. The result must be static; the result cannot change + // between calls. + Name() string +- // If a Compressor implements +- // DecompressedSize(compressedBytes []byte) int, gRPC will call it +- // to determine the size of the buffer allocated for the result of decompression. +- // Return -1 to indicate unknown size. +- // +- // Experimental +- // +- // Notice: This API is EXPERIMENTAL and may be changed or removed in a +- // later release. + } + + var registeredCompressor = make(map[string]Compressor) +@@ -90,9 +85,9 @@ func GetCompressor(name string) Compressor { + // methods can be called from concurrent goroutines. + type Codec interface { + // Marshal returns the wire format of v. +- Marshal(v interface{}) ([]byte, error) ++ Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. +- Unmarshal(data []byte, v interface{}) error ++ Unmarshal(data []byte, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. +diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go +index a3bb173c24a..6306e8bb0f0 100644 +--- a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go ++++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go +@@ -40,7 +40,7 @@ const Name = "gzip" + + func init() { + c := &compressor{} +- c.poolCompressor.New = func() interface{} { ++ c.poolCompressor.New = func() any { + return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} + } + encoding.RegisterCompressor(c) +@@ -61,7 +61,7 @@ func SetLevel(level int) error { + return fmt.Errorf("grpc: invalid gzip compression level: %d", level) + } + c := encoding.GetCompressor(Name).(*compressor) +- c.poolCompressor.New = func() interface{} { ++ c.poolCompressor.New = func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) +diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go +index 3009b35afe7..0ee3d3bae97 100644 +--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go ++++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go +@@ -37,7 +37,7 @@ func init() { + // codec is a Codec implementation with protobuf. It is the default codec for gRPC. + type codec struct{} + +-func (codec) Marshal(v interface{}) ([]byte, error) { ++func (codec) Marshal(v any) ([]byte, error) { + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) +@@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { + return proto.Marshal(vv) + } + +-func (codec) Unmarshal(data []byte, v interface{}) error { ++func (codec) Unmarshal(data []byte, v any) error { + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) +diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go +index 8358dd6e2ab..ac73c9ced25 100644 +--- a/vendor/google.golang.org/grpc/grpclog/component.go ++++ b/vendor/google.golang.org/grpc/grpclog/component.go +@@ -31,71 +31,71 @@ type componentData struct { + + var cache = map[string]*componentData{} + +-func (c *componentData) InfoDepth(depth int, args ...interface{}) { +- args = append([]interface{}{"[" + string(c.name) + "]"}, args...) ++func (c *componentData) InfoDepth(depth int, args ...any) { ++ args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) + } + +-func (c *componentData) WarningDepth(depth int, args ...interface{}) { +- args = append([]interface{}{"[" + string(c.name) + "]"}, args...) ++func (c *componentData) WarningDepth(depth int, args ...any) { ++ args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) + } + +-func (c *componentData) ErrorDepth(depth int, args ...interface{}) { +- args = append([]interface{}{"[" + string(c.name) + "]"}, args...) ++func (c *componentData) ErrorDepth(depth int, args ...any) { ++ args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) + } + +-func (c *componentData) FatalDepth(depth int, args ...interface{}) { +- args = append([]interface{}{"[" + string(c.name) + "]"}, args...) ++func (c *componentData) FatalDepth(depth int, args ...any) { ++ args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) + } + +-func (c *componentData) Info(args ...interface{}) { ++func (c *componentData) Info(args ...any) { + c.InfoDepth(1, args...) + } + +-func (c *componentData) Warning(args ...interface{}) { ++func (c *componentData) Warning(args ...any) { + c.WarningDepth(1, args...) + } + +-func (c *componentData) Error(args ...interface{}) { ++func (c *componentData) Error(args ...any) { + c.ErrorDepth(1, args...) + } + +-func (c *componentData) Fatal(args ...interface{}) { ++func (c *componentData) Fatal(args ...any) { + c.FatalDepth(1, args...) + } + +-func (c *componentData) Infof(format string, args ...interface{}) { ++func (c *componentData) Infof(format string, args ...any) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) + } + +-func (c *componentData) Warningf(format string, args ...interface{}) { ++func (c *componentData) Warningf(format string, args ...any) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) + } + +-func (c *componentData) Errorf(format string, args ...interface{}) { ++func (c *componentData) Errorf(format string, args ...any) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) + } + +-func (c *componentData) Fatalf(format string, args ...interface{}) { ++func (c *componentData) Fatalf(format string, args ...any) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) + } + +-func (c *componentData) Infoln(args ...interface{}) { ++func (c *componentData) Infoln(args ...any) { + c.InfoDepth(1, args...) + } + +-func (c *componentData) Warningln(args ...interface{}) { ++func (c *componentData) Warningln(args ...any) { + c.WarningDepth(1, args...) + } + +-func (c *componentData) Errorln(args ...interface{}) { ++func (c *componentData) Errorln(args ...any) { + c.ErrorDepth(1, args...) + } + +-func (c *componentData) Fatalln(args ...interface{}) { ++func (c *componentData) Fatalln(args ...any) { + c.FatalDepth(1, args...) + } + +diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go +index c8bb2be34bf..16928c9cb99 100644 +--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go ++++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go +@@ -42,53 +42,53 @@ func V(l int) bool { + } + + // Info logs to the INFO log. +-func Info(args ...interface{}) { ++func Info(args ...any) { + grpclog.Logger.Info(args...) + } + + // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +-func Infof(format string, args ...interface{}) { ++func Infof(format string, args ...any) { + grpclog.Logger.Infof(format, args...) + } + + // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +-func Infoln(args ...interface{}) { ++func Infoln(args ...any) { + grpclog.Logger.Infoln(args...) + } + + // Warning logs to the WARNING log. +-func Warning(args ...interface{}) { ++func Warning(args ...any) { + grpclog.Logger.Warning(args...) + } + + // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +-func Warningf(format string, args ...interface{}) { ++func Warningf(format string, args ...any) { + grpclog.Logger.Warningf(format, args...) + } + + // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +-func Warningln(args ...interface{}) { ++func Warningln(args ...any) { + grpclog.Logger.Warningln(args...) + } + + // Error logs to the ERROR log. +-func Error(args ...interface{}) { ++func Error(args ...any) { + grpclog.Logger.Error(args...) + } + + // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +-func Errorf(format string, args ...interface{}) { ++func Errorf(format string, args ...any) { + grpclog.Logger.Errorf(format, args...) + } + + // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +-func Errorln(args ...interface{}) { ++func Errorln(args ...any) { + grpclog.Logger.Errorln(args...) + } + + // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. + // It calls os.Exit() with exit code 1. +-func Fatal(args ...interface{}) { ++func Fatal(args ...any) { + grpclog.Logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +@@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { + + // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. + // It calls os.Exit() with exit code 1. +-func Fatalf(format string, args ...interface{}) { ++func Fatalf(format string, args ...any) { + grpclog.Logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +@@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { + + // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. + // It calle os.Exit()) with exit code 1. +-func Fatalln(args ...interface{}) { ++func Fatalln(args ...any) { + grpclog.Logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +@@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { + // Print prints to the logger. Arguments are handled in the manner of fmt.Print. + // + // Deprecated: use Info. +-func Print(args ...interface{}) { ++func Print(args ...any) { + grpclog.Logger.Info(args...) + } + + // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. + // + // Deprecated: use Infof. +-func Printf(format string, args ...interface{}) { ++func Printf(format string, args ...any) { + grpclog.Logger.Infof(format, args...) + } + + // Println prints to the logger. Arguments are handled in the manner of fmt.Println. + // + // Deprecated: use Infoln. +-func Println(args ...interface{}) { ++func Println(args ...any) { + grpclog.Logger.Infoln(args...) + } +diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go +index ef06a4822b7..b1674d8267c 100644 +--- a/vendor/google.golang.org/grpc/grpclog/logger.go ++++ b/vendor/google.golang.org/grpc/grpclog/logger.go +@@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" + // + // Deprecated: use LoggerV2. + type Logger interface { +- Fatal(args ...interface{}) +- Fatalf(format string, args ...interface{}) +- Fatalln(args ...interface{}) +- Print(args ...interface{}) +- Printf(format string, args ...interface{}) +- Println(args ...interface{}) ++ Fatal(args ...any) ++ Fatalf(format string, args ...any) ++ Fatalln(args ...any) ++ Print(args ...any) ++ Printf(format string, args ...any) ++ Println(args ...any) + } + + // SetLogger sets the logger that is used in grpc. Call only from +@@ -45,39 +45,39 @@ type loggerWrapper struct { + Logger + } + +-func (g *loggerWrapper) Info(args ...interface{}) { ++func (g *loggerWrapper) Info(args ...any) { + g.Logger.Print(args...) + } + +-func (g *loggerWrapper) Infoln(args ...interface{}) { ++func (g *loggerWrapper) Infoln(args ...any) { + g.Logger.Println(args...) + } + +-func (g *loggerWrapper) Infof(format string, args ...interface{}) { ++func (g *loggerWrapper) Infof(format string, args ...any) { + g.Logger.Printf(format, args...) + } + +-func (g *loggerWrapper) Warning(args ...interface{}) { ++func (g *loggerWrapper) Warning(args ...any) { + g.Logger.Print(args...) + } + +-func (g *loggerWrapper) Warningln(args ...interface{}) { ++func (g *loggerWrapper) Warningln(args ...any) { + g.Logger.Println(args...) + } + +-func (g *loggerWrapper) Warningf(format string, args ...interface{}) { ++func (g *loggerWrapper) Warningf(format string, args ...any) { + g.Logger.Printf(format, args...) + } + +-func (g *loggerWrapper) Error(args ...interface{}) { ++func (g *loggerWrapper) Error(args ...any) { + g.Logger.Print(args...) + } + +-func (g *loggerWrapper) Errorln(args ...interface{}) { ++func (g *loggerWrapper) Errorln(args ...any) { + g.Logger.Println(args...) + } + +-func (g *loggerWrapper) Errorf(format string, args ...interface{}) { ++func (g *loggerWrapper) Errorf(format string, args ...any) { + g.Logger.Printf(format, args...) + } + +diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go +index 5de66e40d36..ecfd36d7130 100644 +--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go ++++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go +@@ -33,35 +33,35 @@ import ( + // LoggerV2 does underlying logging work for grpclog. + type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +- Info(args ...interface{}) ++ Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +- Infoln(args ...interface{}) ++ Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +- Infof(format string, args ...interface{}) ++ Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +- Warning(args ...interface{}) ++ Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +- Warningln(args ...interface{}) ++ Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +- Warningf(format string, args ...interface{}) ++ Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +- Error(args ...interface{}) ++ Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +- Errorln(args ...interface{}) ++ Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +- Errorf(format string, args ...interface{}) ++ Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatal(args ...interface{}) ++ Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatalln(args ...interface{}) ++ Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatalf(format string, args ...interface{}) ++ Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool + } +@@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { + g.m[severity].Output(2, string(b)) + } + +-func (g *loggerT) Info(args ...interface{}) { ++func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) + } + +-func (g *loggerT) Infoln(args ...interface{}) { ++func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) + } + +-func (g *loggerT) Infof(format string, args ...interface{}) { ++func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) + } + +-func (g *loggerT) Warning(args ...interface{}) { ++func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) + } + +-func (g *loggerT) Warningln(args ...interface{}) { ++func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) + } + +-func (g *loggerT) Warningf(format string, args ...interface{}) { ++func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) + } + +-func (g *loggerT) Error(args ...interface{}) { ++func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) + } + +-func (g *loggerT) Errorln(args ...interface{}) { ++func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) + } + +-func (g *loggerT) Errorf(format string, args ...interface{}) { ++func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) + } + +-func (g *loggerT) Fatal(args ...interface{}) { ++func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) + } + +-func (g *loggerT) Fatalln(args ...interface{}) { ++func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) + } + +-func (g *loggerT) Fatalf(format string, args ...interface{}) { ++func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) + } +@@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool { + type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. +- InfoDepth(depth int, args ...interface{}) ++ InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. +- WarningDepth(depth int, args ...interface{}) ++ WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. +- ErrorDepth(depth int, args ...interface{}) ++ ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. +- FatalDepth(depth int, args ...interface{}) ++ FatalDepth(depth int, args ...any) + } +diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go +index b5bee483802..740745c45f6 100644 +--- a/vendor/google.golang.org/grpc/health/client.go ++++ b/vendor/google.golang.org/grpc/health/client.go +@@ -56,7 +56,7 @@ const healthCheckMethod = "/grpc.health.v1.Health/Watch" + + // This function implements the protocol defined at: + // https://github.com/grpc/grpc/blob/master/doc/health-checking.md +-func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { ++func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error { + tryCnt := 0 + + retryConnection: +diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +index 142d35f753e..24299efd63f 100644 +--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go ++++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +@@ -17,7 +17,7 @@ + + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: +-// protoc-gen-go v1.30.0 ++// protoc-gen-go v1.31.0 + // protoc v4.22.0 + // source: grpc/health/v1/health.proto + +diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +index a01a1b4d54b..4439cda0f3c 100644 +--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go ++++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +@@ -44,8 +44,15 @@ const ( + // + // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. + type HealthClient interface { +- // If the requested service is unknown, the call will fail with status +- // NOT_FOUND. ++ // Check gets the health of the specified service. If the requested service ++ // is unknown, the call will fail with status NOT_FOUND. If the caller does ++ // not specify a service name, the server should respond with its overall ++ // health status. ++ // ++ // Clients should set a deadline when calling Check, and can declare the ++ // server unhealthy if they do not receive a timely response. ++ // ++ // Check implementations should be idempotent and side effect free. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current +@@ -118,8 +125,15 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { + // All implementations should embed UnimplementedHealthServer + // for forward compatibility + type HealthServer interface { +- // If the requested service is unknown, the call will fail with status +- // NOT_FOUND. ++ // Check gets the health of the specified service. If the requested service ++ // is unknown, the call will fail with status NOT_FOUND. If the caller does ++ // not specify a service name, the server should respond with its overall ++ // health status. ++ // ++ // Clients should set a deadline when calling Check, and can declare the ++ // server unhealthy if they do not receive a timely response. ++ // ++ // Check implementations should be idempotent and side effect free. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current +diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go +index bb96ef57be8..877d78fc3d0 100644 +--- a/vendor/google.golang.org/grpc/interceptor.go ++++ b/vendor/google.golang.org/grpc/interceptor.go +@@ -23,7 +23,7 @@ import ( + ) + + // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +-type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error ++type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error + + // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. + // Unary interceptors can be specified as a DialOption, using +@@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ + // defaults from the ClientConn as well as per-call options. + // + // The returned error must be compatible with the status package. +-type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error ++type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + + // Streamer is called by StreamClientInterceptor to create a ClientStream. + type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) +@@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli + // server side. All per-rpc information may be mutated by the interceptor. + type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. +- Server interface{} ++ Server any + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + } +@@ -78,13 +78,13 @@ type UnaryServerInfo struct { + // status package, or be one of the context errors. Otherwise, gRPC will use + // codes.Unknown as the status code and err.Error() as the status message of the + // RPC. +-type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) ++type UnaryHandler func(ctx context.Context, req any) (any, error) + + // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info + // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper + // of the service method implementation. It is the responsibility of the interceptor to invoke handler + // to complete the RPC. +-type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) ++type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) + + // StreamServerInfo consists of various information about a streaming RPC on + // server side. All per-rpc information may be mutated by the interceptor. +@@ -101,4 +101,4 @@ type StreamServerInfo struct { + // info contains all the information of this RPC the interceptor can operate on. And handler is the + // service method implementation. It is the responsibility of the interceptor to invoke handler to + // complete the RPC. +-type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error ++type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go +index 5fc0ee3da53..fed1c011a32 100644 +--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go ++++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go +@@ -23,6 +23,8 @@ + package backoff + + import ( ++ "context" ++ "errors" + "time" + + grpcbackoff "google.golang.org/grpc/backoff" +@@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { + } + return time.Duration(backoff) + } ++ ++// ErrResetBackoff is the error to be returned by the function executed by RunF, ++// to instruct the latter to reset its backoff state. ++var ErrResetBackoff = errors.New("reset backoff state") ++ ++// RunF provides a convenient way to run a function f repeatedly until the ++// context expires or f returns a non-nil error that is not ErrResetBackoff. ++// When f returns ErrResetBackoff, RunF continues to run f, but resets its ++// backoff state before doing so. backoff accepts an integer representing the ++// number of retries, and returns the amount of time to backoff. ++func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { ++ attempt := 0 ++ timer := time.NewTimer(0) ++ for ctx.Err() == nil { ++ select { ++ case <-timer.C: ++ case <-ctx.Done(): ++ timer.Stop() ++ return ++ } ++ ++ err := f() ++ if errors.Is(err, ErrResetBackoff) { ++ timer.Reset(0) ++ attempt = 0 ++ continue ++ } ++ if err != nil { ++ return ++ } ++ timer.Reset(backoff(attempt)) ++ attempt++ ++ } ++} +diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +index 08666f62a7c..3c594e6e4e5 100644 +--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go ++++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +@@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { + } + } + +-// UpdateSubConnState forwards the update to the appropriate child. +-func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { ++// updateSubConnState forwards the update to the appropriate child. ++func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() +@@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } +- gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. ++ gsb.mu.Unlock() + return + } +- balToUpdate.UpdateSubConnState(sc, state) ++ if state.ConnectivityState == connectivity.Shutdown { ++ delete(balToUpdate.subconns, sc) ++ } ++ gsb.mu.Unlock() ++ if cb != nil { ++ cb(state) ++ } else { ++ balToUpdate.UpdateSubConnState(sc, state) ++ } ++} ++ ++// UpdateSubConnState forwards the update to the appropriate child. ++func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { ++ gsb.updateSubConnState(sc, state, nil) + } + + // Close closes any active child balancers. +@@ -242,7 +255,7 @@ func (gsb *Balancer) Close() { + // + // It implements the balancer.ClientConn interface and is passed down in that + // capacity to the wrapped balancer. It maintains a set of subConns created by +-// the wrapped balancer and calls from the latter to create/update/remove ++// the wrapped balancer and calls from the latter to create/update/shutdown + // SubConns update this set before being forwarded to the parent ClientConn. + // State updates from the wrapped balancer can result in invocation of the + // graceful switch logic. +@@ -254,21 +267,10 @@ type balancerWrapper struct { + subconns map[balancer.SubConn]bool // subconns created by this balancer + } + +-func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +- if state.ConnectivityState == connectivity.Shutdown { +- bw.gsb.mu.Lock() +- delete(bw.subconns, sc) +- bw.gsb.mu.Unlock() +- } +- // There is no need to protect this read with a mutex, as the write to the +- // Balancer field happens in SwitchTo, which completes before this can be +- // called. +- bw.Balancer.UpdateSubConnState(sc, state) +-} +- +-// Close closes the underlying LB policy and removes the subconns it created. bw +-// must not be referenced via balancerCurrent or balancerPending in gsb when +-// called. gsb.mu must not be held. Does not panic with a nil receiver. ++// Close closes the underlying LB policy and shuts down the subconns it ++// created. bw must not be referenced via balancerCurrent or balancerPending in ++// gsb when called. gsb.mu must not be held. Does not panic with a nil ++// receiver. + func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { +@@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() { + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { +- bw.gsb.cc.RemoveSubConn(sc) ++ sc.Shutdown() + } + bw.gsb.mu.Unlock() + } +@@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne + } + bw.gsb.mu.Unlock() + ++ var sc balancer.SubConn ++ oldListener := opts.StateListener ++ opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call +- bw.gsb.cc.RemoveSubConn(sc) ++ sc.Shutdown() + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } +@@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + } + + func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { +- bw.gsb.mu.Lock() +- if !bw.gsb.balancerCurrentOrPending(bw) { +- bw.gsb.mu.Unlock() +- return +- } +- bw.gsb.mu.Unlock() +- bw.gsb.cc.RemoveSubConn(sc) ++ // Note: existing third party balancers may call this, so it must remain ++ // until RemoveSubConn is fully removed. ++ sc.Shutdown() + } + + func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { +diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go +index 3a905d96657..94a08d6875a 100644 +--- a/vendor/google.golang.org/grpc/internal/balancerload/load.go ++++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go +@@ -25,7 +25,7 @@ import ( + // Parser converts loads from metadata into a concrete type. + type Parser interface { + // Parse parses loads from metadata. +- Parse(md metadata.MD) interface{} ++ Parse(md metadata.MD) any + } + + var parser Parser +@@ -38,7 +38,7 @@ func SetParser(lr Parser) { + } + + // Parse calls parser.Read(). +-func Parse(md metadata.MD) interface{} { ++func Parse(md metadata.MD) any { + if parser == nil { + return nil + } +diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +index 6c3f632215f..0f31274a3cc 100644 +--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go ++++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +@@ -230,7 +230,7 @@ type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. +- Message interface{} ++ Message any + } + + func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { +@@ -270,7 +270,7 @@ type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. +- Message interface{} ++ Message any + } + + func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { +diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +index 81c2f5fd761..4399c3df495 100644 +--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go ++++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +@@ -28,25 +28,25 @@ import "sync" + // the underlying mutex used for synchronization. + // + // Unbounded supports values of any type to be stored in it by using a channel +-// of `interface{}`. This means that a call to Put() incurs an extra memory +-// allocation, and also that users need a type assertion while reading. For +-// performance critical code paths, using Unbounded is strongly discouraged and +-// defining a new type specific implementation of this buffer is preferred. See ++// of `any`. This means that a call to Put() incurs an extra memory allocation, ++// and also that users need a type assertion while reading. For performance ++// critical code paths, using Unbounded is strongly discouraged and defining a ++// new type specific implementation of this buffer is preferred. See + // internal/transport/transport.go for an example of this. + type Unbounded struct { +- c chan interface{} ++ c chan any + closed bool + mu sync.Mutex +- backlog []interface{} ++ backlog []any + } + + // NewUnbounded returns a new instance of Unbounded. + func NewUnbounded() *Unbounded { +- return &Unbounded{c: make(chan interface{}, 1)} ++ return &Unbounded{c: make(chan any, 1)} + } + + // Put adds t to the unbounded buffer. +-func (b *Unbounded) Put(t interface{}) { ++func (b *Unbounded) Put(t any) { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { +@@ -89,7 +89,7 @@ func (b *Unbounded) Load() { + // + // If the unbounded buffer is closed, the read channel returned by this method + // is closed. +-func (b *Unbounded) Get() <-chan interface{} { ++func (b *Unbounded) Get() <-chan any { + return b.c + } + +diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go +index 777cbcd7921..5395e77529c 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go +@@ -24,9 +24,7 @@ + package channelz + + import ( +- "context" + "errors" +- "fmt" + "sort" + "sync" + "sync/atomic" +@@ -40,8 +38,11 @@ const ( + ) + + var ( +- db dbWrapper +- idGen idGenerator ++ // IDGen is the global channelz entity ID generator. It should not be used ++ // outside this package except by tests. ++ IDGen IDGenerator ++ ++ db dbWrapper + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 +@@ -52,14 +53,14 @@ var ( + func TurnOn() { + if !IsOn() { + db.set(newChannelMap()) +- idGen.reset() ++ IDGen.Reset() + atomic.StoreInt32(&curState, 1) + } + } + + // IsOn returns whether channelz data collection is on. + func IsOn() bool { +- return atomic.CompareAndSwapInt32(&curState, 1, 1) ++ return atomic.LoadInt32(&curState) == 1 + } + + // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +@@ -97,43 +98,6 @@ func (d *dbWrapper) get() *channelMap { + return d.DB + } + +-// NewChannelzStorageForTesting initializes channelz data storage and id +-// generator for testing purposes. +-// +-// Returns a cleanup function to be invoked by the test, which waits for up to +-// 10s for all channelz state to be reset by the grpc goroutines when those +-// entities get closed. This cleanup function helps with ensuring that tests +-// don't mess up each other. +-func NewChannelzStorageForTesting() (cleanup func() error) { +- db.set(newChannelMap()) +- idGen.reset() +- +- return func() error { +- cm := db.get() +- if cm == nil { +- return nil +- } +- +- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) +- defer cancel() +- ticker := time.NewTicker(10 * time.Millisecond) +- defer ticker.Stop() +- for { +- cm.mu.RLock() +- topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) +- cm.mu.RUnlock() +- +- if err := ctx.Err(); err != nil { +- return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) +- } +- if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { +- return nil +- } +- <-ticker.C +- } +- } +-} +- + // GetTopChannels returns a slice of top channel's ChannelMetric, along with a + // boolean indicating whether there's more top channels to be queried for. + // +@@ -193,7 +157,7 @@ func GetServer(id int64) *ServerMetric { + // + // If channelz is not turned ON, the channelz database is not mutated. + func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { +- id := idGen.genID() ++ id := IDGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { +@@ -229,7 +193,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") + } +- id := idGen.genID() ++ id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } +@@ -251,7 +215,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er + // + // If channelz is not turned ON, the channelz database is not mutated. + func RegisterServer(s Server, ref string) *Identifier { +- id := idGen.genID() ++ id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } +@@ -277,7 +241,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") + } +- id := idGen.genID() ++ id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } +@@ -297,7 +261,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") + } +- id := idGen.genID() ++ id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } +@@ -776,14 +740,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { + return sm + } + +-type idGenerator struct { ++// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. ++type IDGenerator struct { + id int64 + } + +-func (i *idGenerator) reset() { ++// Reset resets the generated ID back to zero. Should only be used at ++// initialization or by tests sensitive to the ID number. ++func (i *IDGenerator) Reset() { + atomic.StoreInt64(&i.id, 0) + } + +-func (i *idGenerator) genID() int64 { ++func (i *IDGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) + } +diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go +index 8e13a3d2ce7..f89e6f77bbd 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/logging.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go +@@ -31,7 +31,7 @@ func withParens(id *Identifier) string { + } + + // Info logs and adds a trace event if channelz is on. +-func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { ++func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, +@@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + } + + // Infof logs and adds a trace event if channelz is on. +-func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { ++func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, +@@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter + } + + // Warning logs and adds a trace event if channelz is on. +-func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { ++func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, +@@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + } + + // Warningf logs and adds a trace event if channelz is on. +-func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { ++func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, +@@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in + } + + // Error logs and adds a trace event if channelz is on. +-func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { ++func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, +@@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + } + + // Errorf logs and adds a trace event if channelz is on. +-func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { ++func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, +diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go +index 7b2f350e2e6..1d4020f5379 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/types.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/types.go +@@ -628,6 +628,7 @@ type tracedChannel interface { + + type channelTrace struct { + cm *channelMap ++ clearCalled bool + createdTime time.Time + eventCount int64 + mu sync.Mutex +@@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { + } + + func (c *channelTrace) clear() { ++ if c.clearCalled { ++ return ++ } ++ c.clearCalled = true + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { +diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +index 8d194e44e1d..98288c3f866 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +@@ -23,7 +23,7 @@ import ( + ) + + // GetSocketOption gets the socket option info of the conn. +-func GetSocketOption(socket interface{}) *SocketOptionData { ++func GetSocketOption(socket any) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil +diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +index 837ddc40240..b5568b22e20 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +@@ -22,6 +22,6 @@ + package channelz + + // GetSocketOption gets the socket option info of the conn. +-func GetSocketOption(c interface{}) *SocketOptionData { ++func GetSocketOption(c any) *SocketOptionData { + return nil + } +diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go +index 32c9b59033c..9deee7f6513 100644 +--- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go ++++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go +@@ -25,12 +25,12 @@ import ( + type requestInfoKey struct{} + + // NewRequestInfoContext creates a context with ri. +-func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { ++func NewRequestInfoContext(ctx context.Context, ri any) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) + } + + // RequestInfoFromContext extracts the RequestInfo from ctx. +-func RequestInfoFromContext(ctx context.Context) interface{} { ++func RequestInfoFromContext(ctx context.Context) any { + return ctx.Value(requestInfoKey{}) + } + +@@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { + type clientHandshakeInfoKey struct{} + + // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +-func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { ++func ClientHandshakeInfoFromContext(ctx context.Context) any { + return ctx.Value(clientHandshakeInfoKey{}) + } + + // NewClientHandshakeInfoContext creates a context with chi. +-func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { ++func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) + } +diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +index 80fd5c7d2a4..3cf10ddfbd4 100644 +--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go ++++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +@@ -37,9 +37,15 @@ var ( + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the +- // pick_first LB policy, which can be enabled by setting the environment +- // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". +- PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) ++ // pick_first LB policy. ++ PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) ++ // LeastRequestLB is set if we should support the least_request_experimental ++ // LB policy, which can be enabled by setting the environment variable ++ // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". ++ LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) ++ // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS ++ // handshakes that can be performed. ++ ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + ) + + func boolFromEnv(envVar string, def bool) bool { +diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +index b68e26a3649..bfc45102ab2 100644 +--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go ++++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +@@ -30,7 +30,7 @@ var Logger LoggerV2 + var DepthLogger DepthLoggerV2 + + // InfoDepth logs to the INFO log at the specified depth. +-func InfoDepth(depth int, args ...interface{}) { ++func InfoDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { +@@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { + } + + // WarningDepth logs to the WARNING log at the specified depth. +-func WarningDepth(depth int, args ...interface{}) { ++func WarningDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { +@@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { + } + + // ErrorDepth logs to the ERROR log at the specified depth. +-func ErrorDepth(depth int, args ...interface{}) { ++func ErrorDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { +@@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { + } + + // FatalDepth logs to the FATAL log at the specified depth. +-func FatalDepth(depth int, args ...interface{}) { ++func FatalDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { +@@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { + // is defined here to avoid a circular dependency. + type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +- Info(args ...interface{}) ++ Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +- Infoln(args ...interface{}) ++ Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +- Infof(format string, args ...interface{}) ++ Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +- Warning(args ...interface{}) ++ Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +- Warningln(args ...interface{}) ++ Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +- Warningf(format string, args ...interface{}) ++ Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +- Error(args ...interface{}) ++ Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +- Errorln(args ...interface{}) ++ Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +- Errorf(format string, args ...interface{}) ++ Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatal(args ...interface{}) ++ Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatalln(args ...interface{}) ++ Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatalf(format string, args ...interface{}) ++ Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool + } +@@ -116,11 +116,11 @@ type LoggerV2 interface { + // later release. + type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. +- InfoDepth(depth int, args ...interface{}) ++ InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. +- WarningDepth(depth int, args ...interface{}) ++ WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. +- ErrorDepth(depth int, args ...interface{}) ++ ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. +- FatalDepth(depth int, args ...interface{}) ++ FatalDepth(depth int, args ...any) + } +diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +index 02224b42ca8..faa998de763 100644 +--- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go ++++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +@@ -31,7 +31,7 @@ type PrefixLogger struct { + } + + // Infof does info logging. +-func (pl *PrefixLogger) Infof(format string, args ...interface{}) { ++func (pl *PrefixLogger) Infof(format string, args ...any) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format +@@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + } + + // Warningf does warning logging. +-func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { ++func (pl *PrefixLogger) Warningf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) +@@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + } + + // Errorf does error logging. +-func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { ++func (pl *PrefixLogger) Errorf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) +@@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + } + + // Debugf does info logging at verbose level 2. +-func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { ++func (pl *PrefixLogger) Debugf(format string, args ...any) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. +diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +index d08e3e90766..aa97273e7d1 100644 +--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go ++++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +@@ -80,6 +80,13 @@ func Uint32() uint32 { + return r.Uint32() + } + ++// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. ++func ExpFloat64() float64 { ++ mu.Lock() ++ defer mu.Unlock() ++ return r.ExpFloat64() ++} ++ + // Shuffle implements rand.Shuffle on the grpcrand global source. + var Shuffle = func(n int, f func(int, int)) { + mu.Lock() +diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +index 37b8d4117e7..900917dbe6c 100644 +--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go ++++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +@@ -32,10 +32,10 @@ import ( + // + // This type is safe for concurrent access. + type CallbackSerializer struct { +- // Done is closed once the serializer is shut down completely, i.e all ++ // done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. +- Done chan struct{} ++ done chan struct{} + + callbacks *buffer.Unbounded + closedMu sync.Mutex +@@ -48,12 +48,12 @@ type CallbackSerializer struct { + // callbacks will be added once this context is canceled, and any pending un-run + // callbacks will be executed before the serializer is shut down. + func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { +- t := &CallbackSerializer{ +- Done: make(chan struct{}), ++ cs := &CallbackSerializer{ ++ done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } +- go t.run(ctx) +- return t ++ go cs.run(ctx) ++ return cs + } + + // Schedule adds a callback to be scheduled after existing callbacks are run. +@@ -64,56 +64,62 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + // Return value indicates if the callback was successfully added to the list of + // callbacks to be executed by the serializer. It is not possible to add + // callbacks once the context passed to NewCallbackSerializer is cancelled. +-func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { +- t.closedMu.Lock() +- defer t.closedMu.Unlock() ++func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { ++ cs.closedMu.Lock() ++ defer cs.closedMu.Unlock() + +- if t.closed { ++ if cs.closed { + return false + } +- t.callbacks.Put(f) ++ cs.callbacks.Put(f) + return true + } + +-func (t *CallbackSerializer) run(ctx context.Context) { ++func (cs *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + +- defer close(t.Done) ++ defer close(cs.done) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. +- case callback, ok := <-t.callbacks.Get(): ++ case callback, ok := <-cs.callbacks.Get(): + if !ok { + return + } +- t.callbacks.Load() ++ cs.callbacks.Load() + callback.(func(ctx context.Context))(ctx) + } + } + + // Fetch pending callbacks if any, and execute them before returning from +- // this method and closing t.Done. +- t.closedMu.Lock() +- t.closed = true +- backlog = t.fetchPendingCallbacks() +- t.callbacks.Close() +- t.closedMu.Unlock() ++ // this method and closing cs.done. ++ cs.closedMu.Lock() ++ cs.closed = true ++ backlog = cs.fetchPendingCallbacks() ++ cs.callbacks.Close() ++ cs.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } + } + +-func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { ++func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { +- case b := <-t.callbacks.Get(): ++ case b := <-cs.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) +- t.callbacks.Load() ++ cs.callbacks.Load() + default: + return backlog + } + } + } ++ ++// Done returns a channel that is closed after the context passed to ++// NewCallbackSerializer is canceled and all callbacks have been executed. ++func (cs *CallbackSerializer) Done() <-chan struct{} { ++ return cs.done ++} +diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +new file mode 100644 +index 00000000000..aef8cec1ab0 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +@@ -0,0 +1,121 @@ ++/* ++ * ++ * Copyright 2023 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package grpcsync ++ ++import ( ++ "context" ++ "sync" ++) ++ ++// Subscriber represents an entity that is subscribed to messages published on ++// a PubSub. It wraps the callback to be invoked by the PubSub when a new ++// message is published. ++type Subscriber interface { ++ // OnMessage is invoked when a new message is published. Implementations ++ // must not block in this method. ++ OnMessage(msg any) ++} ++ ++// PubSub is a simple one-to-many publish-subscribe system that supports ++// messages of arbitrary type. It guarantees that messages are delivered in ++// the same order in which they were published. ++// ++// Publisher invokes the Publish() method to publish new messages, while ++// subscribers interested in receiving these messages register a callback ++// via the Subscribe() method. ++// ++// Once a PubSub is stopped, no more messages can be published, but any pending ++// published messages will be delivered to the subscribers. Done may be used ++// to determine when all published messages have been delivered. ++type PubSub struct { ++ cs *CallbackSerializer ++ ++ // Access to the below fields are guarded by this mutex. ++ mu sync.Mutex ++ msg any ++ subscribers map[Subscriber]bool ++} ++ ++// NewPubSub returns a new PubSub instance. Users should cancel the ++// provided context to shutdown the PubSub. ++func NewPubSub(ctx context.Context) *PubSub { ++ return &PubSub{ ++ cs: NewCallbackSerializer(ctx), ++ subscribers: map[Subscriber]bool{}, ++ } ++} ++ ++// Subscribe registers the provided Subscriber to the PubSub. ++// ++// If the PubSub contains a previously published message, the Subscriber's ++// OnMessage() callback will be invoked asynchronously with the existing ++// message to begin with, and subsequently for every newly published message. ++// ++// The caller is responsible for invoking the returned cancel function to ++// unsubscribe itself from the PubSub. ++func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { ++ ps.mu.Lock() ++ defer ps.mu.Unlock() ++ ++ ps.subscribers[sub] = true ++ ++ if ps.msg != nil { ++ msg := ps.msg ++ ps.cs.Schedule(func(context.Context) { ++ ps.mu.Lock() ++ defer ps.mu.Unlock() ++ if !ps.subscribers[sub] { ++ return ++ } ++ sub.OnMessage(msg) ++ }) ++ } ++ ++ return func() { ++ ps.mu.Lock() ++ defer ps.mu.Unlock() ++ delete(ps.subscribers, sub) ++ } ++} ++ ++// Publish publishes the provided message to the PubSub, and invokes ++// callbacks registered by subscribers asynchronously. ++func (ps *PubSub) Publish(msg any) { ++ ps.mu.Lock() ++ defer ps.mu.Unlock() ++ ++ ps.msg = msg ++ for sub := range ps.subscribers { ++ s := sub ++ ps.cs.Schedule(func(context.Context) { ++ ps.mu.Lock() ++ defer ps.mu.Unlock() ++ if !ps.subscribers[s] { ++ return ++ } ++ s.OnMessage(msg) ++ }) ++ } ++} ++ ++// Done returns a channel that is closed after the context passed to NewPubSub ++// is canceled and all updates have been sent to subscribers. ++func (ps *PubSub) Done() <-chan struct{} { ++ return ps.cs.Done() ++} +diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go +similarity index 61% +rename from vendor/google.golang.org/grpc/idle.go +rename to vendor/google.golang.org/grpc/internal/idle/idle.go +index dc3dc72f6b0..6c272476e5e 100644 +--- a/vendor/google.golang.org/grpc/idle.go ++++ b/vendor/google.golang.org/grpc/internal/idle/idle.go +@@ -16,7 +16,9 @@ + * + */ + +-package grpc ++// Package idle contains a component for managing idleness (entering and exiting) ++// based on RPC activity. ++package idle + + import ( + "fmt" +@@ -24,6 +26,8 @@ import ( + "sync" + "sync/atomic" + "time" ++ ++ "google.golang.org/grpc/grpclog" + ) + + // For overriding in unit tests. +@@ -31,31 +35,31 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) + } + +-// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter ++// Enforcer is the functionality provided by grpc.ClientConn to enter + // and exit from idle mode. +-type idlenessEnforcer interface { +- exitIdleMode() error +- enterIdleMode() error ++type Enforcer interface { ++ ExitIdleMode() error ++ EnterIdleMode() error + } + +-// idlenessManager defines the functionality required to track RPC activity on a ++// Manager defines the functionality required to track RPC activity on a + // channel. +-type idlenessManager interface { +- onCallBegin() error +- onCallEnd() +- close() ++type Manager interface { ++ OnCallBegin() error ++ OnCallEnd() ++ Close() + } + +-type noopIdlenessManager struct{} ++type noopManager struct{} + +-func (noopIdlenessManager) onCallBegin() error { return nil } +-func (noopIdlenessManager) onCallEnd() {} +-func (noopIdlenessManager) close() {} ++func (noopManager) OnCallBegin() error { return nil } ++func (noopManager) OnCallEnd() {} ++func (noopManager) Close() {} + +-// idlenessManagerImpl implements the idlenessManager interface. It uses atomic +-// operations to synchronize access to shared state and a mutex to guarantee +-// mutual exclusion in a critical section. +-type idlenessManagerImpl struct { ++// manager implements the Manager interface. It uses atomic operations to ++// synchronize access to shared state and a mutex to guarantee mutual exclusion ++// in a critical section. ++type manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. +@@ -64,14 +68,15 @@ type idlenessManagerImpl struct { + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. +- enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. +- timeout int64 // Idle timeout duration nanos stored as an int64. ++ enforcer Enforcer // Functionality provided by grpc.ClientConn. ++ timeout int64 // Idle timeout duration nanos stored as an int64. ++ logger grpclog.LoggerV2 + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. +- // - b: At the same time an RPC is made on the channel, and onCallBegin() ++ // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at +@@ -83,28 +88,37 @@ type idlenessManagerImpl struct { + timer *time.Timer + } + +-// newIdlenessManager creates a new idleness manager implementation for the ++// ManagerOptions is a collection of options used by ++// NewManager. ++type ManagerOptions struct { ++ Enforcer Enforcer ++ Timeout time.Duration ++ Logger grpclog.LoggerV2 ++} ++ ++// NewManager creates a new idleness manager implementation for the + // given idle timeout. +-func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { +- if idleTimeout == 0 { +- return noopIdlenessManager{} ++func NewManager(opts ManagerOptions) Manager { ++ if opts.Timeout == 0 { ++ return noopManager{} + } + +- i := &idlenessManagerImpl{ +- enforcer: enforcer, +- timeout: int64(idleTimeout), ++ m := &manager{ ++ enforcer: opts.Enforcer, ++ timeout: int64(opts.Timeout), ++ logger: opts.Logger, + } +- i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) +- return i ++ m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) ++ return m + } + + // resetIdleTimer resets the idle timer to the given duration. This method + // should only be called from the timer callback. +-func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { +- i.idleMu.Lock() +- defer i.idleMu.Unlock() ++func (m *manager) resetIdleTimer(d time.Duration) { ++ m.idleMu.Lock() ++ defer m.idleMu.Unlock() + +- if i.timer == nil { ++ if m.timer == nil { + // Only close sets timer to nil. We are done. + return + } +@@ -112,47 +126,47 @@ func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. +- i.timer.Reset(d) ++ m.timer.Reset(d) + } + + // handleIdleTimeout is the timer callback that is invoked upon expiry of the + // configured idle timeout. The channel is considered inactive if there are no + // ongoing calls and no RPC activity since the last time the timer fired. +-func (i *idlenessManagerImpl) handleIdleTimeout() { +- if i.isClosed() { ++func (m *manager) handleIdleTimeout() { ++ if m.isClosed() { + return + } + +- if atomic.LoadInt32(&i.activeCallsCount) > 0 { +- i.resetIdleTimer(time.Duration(i.timeout)) ++ if atomic.LoadInt32(&m.activeCallsCount) > 0 { ++ m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. +- if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { ++ if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. +- atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) +- i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) ++ atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) ++ m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the +- // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the ++ // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the + // channel is either in idle mode or is trying to get there. +- if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { ++ if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. +- i.resetIdleTimer(time.Duration(i.timeout)) ++ m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. +- if i.tryEnterIdleMode() { ++ if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } +@@ -160,8 +174,8 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. +- atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) +- i.resetIdleTimer(time.Duration(i.timeout)) ++ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) ++ m.resetIdleTimer(time.Duration(m.timeout)) + } + + // tryEnterIdleMode instructs the channel to enter idle mode. But before +@@ -171,15 +185,15 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { + // Return value indicates whether or not the channel moved to idle mode. + // + // Holds idleMu which ensures mutual exclusion with exitIdleMode. +-func (i *idlenessManagerImpl) tryEnterIdleMode() bool { +- i.idleMu.Lock() +- defer i.idleMu.Unlock() ++func (m *manager) tryEnterIdleMode() bool { ++ m.idleMu.Lock() ++ defer m.idleMu.Unlock() + +- if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { ++ if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } +- if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { ++ if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. +@@ -189,99 +203,99 @@ func (i *idlenessManagerImpl) tryEnterIdleMode() bool { + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. +- if err := i.enforcer.enterIdleMode(); err != nil { +- logger.Errorf("Failed to enter idle mode: %v", err) ++ if err := m.enforcer.EnterIdleMode(); err != nil { ++ m.logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. +- i.actuallyIdle = true ++ m.actuallyIdle = true + return true + } + +-// onCallBegin is invoked at the start of every RPC. +-func (i *idlenessManagerImpl) onCallBegin() error { +- if i.isClosed() { ++// OnCallBegin is invoked at the start of every RPC. ++func (m *manager) OnCallBegin() error { ++ if m.isClosed() { + return nil + } + +- if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { ++ if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. +- atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) ++ atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. +- if err := i.exitIdleMode(); err != nil { ++ if err := m.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. +- atomic.AddInt32(&i.activeCallsCount, -1) ++ atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + +- atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) ++ atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // exitIdleMode instructs the channel to exit idle mode. + // + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +-func (i *idlenessManagerImpl) exitIdleMode() error { +- i.idleMu.Lock() +- defer i.idleMu.Unlock() ++func (m *manager) exitIdleMode() error { ++ m.idleMu.Lock() ++ defer m.idleMu.Unlock() + +- if !i.actuallyIdle { ++ if !m.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC +- // came in and onCallBegin() noticed that the calls count is negative. ++ // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same +- // time, all of them notice a negative calls count in onCallBegin and get ++ // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + +- if err := i.enforcer.exitIdleMode(); err != nil { ++ if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. +- atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) +- i.actuallyIdle = false ++ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) ++ m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. +- i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) ++ m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + return nil + } + +-// onCallEnd is invoked at the end of every RPC. +-func (i *idlenessManagerImpl) onCallEnd() { +- if i.isClosed() { ++// OnCallEnd is invoked at the end of every RPC. ++func (m *manager) OnCallEnd() { ++ if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. +- atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) ++ atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. +- atomic.AddInt32(&i.activeCallsCount, -1) ++ atomic.AddInt32(&m.activeCallsCount, -1) + } + +-func (i *idlenessManagerImpl) isClosed() bool { +- return atomic.LoadInt32(&i.closed) == 1 ++func (m *manager) isClosed() bool { ++ return atomic.LoadInt32(&m.closed) == 1 + } + +-func (i *idlenessManagerImpl) close() { +- atomic.StoreInt32(&i.closed, 1) ++func (m *manager) Close() { ++ atomic.StoreInt32(&m.closed, 1) + +- i.idleMu.Lock() +- i.timer.Stop() +- i.timer = nil +- i.idleMu.Unlock() ++ m.idleMu.Lock() ++ m.timer.Stop() ++ m.timer = nil ++ m.idleMu.Unlock() + } +diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go +index 42ff39c8444..0d94c63e06e 100644 +--- a/vendor/google.golang.org/grpc/internal/internal.go ++++ b/vendor/google.golang.org/grpc/internal/internal.go +@@ -30,7 +30,7 @@ import ( + + var ( + // WithHealthCheckFunc is set by dialoptions.go +- WithHealthCheckFunc interface{} // func (HealthChecker) DialOption ++ WithHealthCheckFunc any // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. +@@ -38,8 +38,12 @@ var ( + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second ++ // KeepaliveMinServerPingTime is the minimum ping interval for servers. ++ // This must be 1s by default, but tests may wish to set it lower for ++ // convenience. ++ KeepaliveMinServerPingTime = time.Second + // ParseServiceConfig parses a JSON representation of the service config. +- ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult ++ ParseServiceConfig any // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfig. + // This function compares the config without rawJSON stripped, in case the +@@ -49,33 +53,33 @@ var ( + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. +- GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder ++ GetCertificateProviderBuilder any // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. +- GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo ++ GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. +- GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials ++ GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- CanonicalString interface{} // func (codes.Code) string ++ CanonicalString any // func (codes.Code) string + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. +- DrainServerTransports interface{} // func(*grpc.Server, string) ++ DrainServerTransports any // func(*grpc.Server, string) + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- AddGlobalServerOptions interface{} // func(opt ...ServerOption) ++ AddGlobalServerOptions any // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + // +@@ -88,14 +92,14 @@ var ( + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- AddGlobalDialOptions interface{} // func(opt ...DialOption) ++ AddGlobalDialOptions any // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- DisableGlobalDialOptions interface{} // func() grpc.DialOption ++ DisableGlobalDialOptions any // func() grpc.DialOption + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + // +@@ -104,23 +108,26 @@ var ( + ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. +- JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption ++ JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption + // JoinServerOptions combines the server options passed as arguments into a + // single server option. +- JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption ++ JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption ++ WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption ++ BinaryLogger any // func(binarylog.Logger) grpc.ServerOption ++ ++ // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn ++ SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from +@@ -131,7 +138,7 @@ var ( + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. +- NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) ++ NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment +@@ -163,7 +170,17 @@ var ( + UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. +- ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) ++ ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) ++ ++ // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra ++ // metadata to RPCs. ++ GRPCResolverSchemeExtraMetadata string = "xds" ++ ++ // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. ++ EnterIdleModeForTesting any // func(*grpc.ClientConn) error ++ ++ // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ++ ExitIdleModeForTesting any // func(*grpc.ClientConn) error + ) + + // HealthChecker defines the signature of the client-side LB channel health checking function. +@@ -174,7 +191,7 @@ var ( + // + // The health checking protocol is defined at: + // https://github.com/grpc/grpc/blob/master/doc/health-checking.md +-type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error ++type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error + + const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. +diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go +index c82e608e077..900bfb71608 100644 +--- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go ++++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go +@@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") + + type mdValue metadata.MD + +-func (m mdValue) Equal(o interface{}) bool { ++func (m mdValue) Equal(o any) bool { + om, ok := o.(mdValue) + if !ok { + return false +diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go +index 0177af4b511..7033191375d 100644 +--- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go ++++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go +@@ -35,7 +35,7 @@ const jsonIndent = " " + // ToJSON marshals the input into a json string. + // + // If marshal fails, it falls back to fmt.Sprintf("%+v"). +-func ToJSON(e interface{}) string { ++func ToJSON(e any) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} +diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +index c7a18a948ad..f0603871c93 100644 +--- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go ++++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +@@ -92,7 +92,7 @@ type ClientStream interface { + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. +- SendMsg(m interface{}) error ++ SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC +@@ -101,7 +101,7 @@ type ClientStream interface { + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. +- RecvMsg(m interface{}) error ++ RecvMsg(m any) error + } + + // ClientInterceptor is an interceptor for gRPC client streams. +diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +index 09a667f33cb..99e1e5b36c8 100644 +--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go ++++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +@@ -62,7 +62,8 @@ const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" +- // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. ++ // txtPrefix is the prefix string to be prepended to the host name for txt ++ // record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. +@@ -86,14 +87,14 @@ var ( + minDNSResRate = 30 * time.Second + ) + +-var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { +- return func(ctx context.Context, network, address string) (net.Conn, error) { ++var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { ++ return func(ctx context.Context, network, _ string) (net.Conn, error) { + var dialer net.Dialer +- return dialer.DialContext(ctx, network, authority) ++ return dialer.DialContext(ctx, network, address) + } + } + +-var customAuthorityResolver = func(authority string) (netResolver, error) { ++var newNetResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err +@@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { + + return &net.Resolver{ + PreferGo: true, +- Dial: customAuthorityDialler(authorityWithPort), ++ Dial: addressDialer(authorityWithPort), + }, nil + } + +@@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder { + + type dnsBuilder struct{} + +-// Build creates and starts a DNS resolver that watches the name resolution of the target. ++// Build creates and starts a DNS resolver that watches the name resolution of ++// the target. + func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint(), defaultPort) + if err != nil { +@@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts + if target.URL.Host == "" { + d.resolver = defaultResolver + } else { +- d.resolver, err = customAuthorityResolver(target.URL.Host) ++ d.resolver, err = newNetResolver(target.URL.Host) + if err != nil { + return nil, err + } +@@ -180,19 +182,22 @@ type dnsResolver struct { + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn +- // rn channel is used by ResolveNow() to force an immediate resolution of the target. ++ // rn channel is used by ResolveNow() to force an immediate resolution of the ++ // target. + rn chan struct{} +- // wg is used to enforce Close() to return after the watcher() goroutine has finished. +- // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we +- // replace the real lookup functions with mocked ones to facilitate testing. +- // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes +- // will warns lookup (READ the lookup function pointers) inside watcher() goroutine +- // has data race with replaceNetFunc (WRITE the lookup function pointers). ++ // wg is used to enforce Close() to return after the watcher() goroutine has ++ // finished. Otherwise, data race will be possible. [Race Example] in ++ // dns_resolver_test we replace the real lookup functions with mocked ones to ++ // facilitate testing. If Close() doesn't wait for watcher() goroutine ++ // finishes, race detector sometimes will warns lookup (READ the lookup ++ // function pointers) inside watcher() goroutine has data race with ++ // replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool + } + +-// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. ++// ResolveNow invoke an immediate resolution of the target that this ++// dnsResolver watches. + func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: +@@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() { + + var timer *time.Timer + if err == nil { +- // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least +- // to prevent constantly re-resolving. ++ // Success resolving, wait for the next ResolveNow. However, also wait 30 ++ // seconds at the very least to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { +@@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() { + case <-d.rn: + } + } else { +- // Poll on an error found in DNS Resolver or an error received from ClientConn. ++ // Poll on an error found in DNS Resolver or an error received from ++ // ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } +@@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + } + + func handleDNSError(err error, lookupType string) error { +- if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { ++ dnsErr, ok := err.(*net.DNSError) ++ if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). +@@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + res += s + } + +- // TXT record must have "grpc_config=" attribute in order to be used as service config. ++ // TXT record must have "grpc_config=" attribute in order to be used as ++ // service config. + if !strings.HasPrefix(res, txtAttribute) { + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) +- // This is not an error; it is the equivalent of not having a service config. ++ // This is not an error; it is the equivalent of not having a service ++ // config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) +@@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { + return &state, nil + } + +-// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +-// If addr is an IPv4 address, return the addr and ok = true. +-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. ++// formatIP returns ok = false if addr is not a valid textual representation of ++// an IP address. If addr is an IPv4 address, return the addr and ok = true. ++// If addr is an IPv6 address, return the addr enclosed in square brackets and ++// ok = true. + func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { +@@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) { + return "[" + addr + "]", true + } + +-// parseTarget takes the user input target string and default port, returns formatted host and port info. +-// If target doesn't specify a port, set the port to be the defaultPort. +-// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +-// are stripped when setting the host. ++// parseTarget takes the user input target string and default port, returns ++// formatted host and port info. If target doesn't specify a port, set the port ++// to be the defaultPort. If target is in IPv6 format and host-name is enclosed ++// in square brackets, brackets are stripped when setting the host. + // examples: + // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" + // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +@@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { +- // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. ++ // If the port field is empty (target ends with colon), e.g. "[::1]:", ++ // this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { +- // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. ++ // Keep consistent with net.Dial(): If the host is empty, as in ":80", ++ // the local system is assumed. + host = "localhost" + } + return host, port, nil +diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go +index b0ead4f54f8..03ef2fedd5c 100644 +--- a/vendor/google.golang.org/grpc/internal/status/status.go ++++ b/vendor/google.golang.org/grpc/internal/status/status.go +@@ -43,13 +43,41 @@ type Status struct { + s *spb.Status + } + ++// NewWithProto returns a new status including details from statusProto. This ++// is meant to be used by the gRPC library only. ++func NewWithProto(code codes.Code, message string, statusProto []string) *Status { ++ if len(statusProto) != 1 { ++ // No grpc-status-details bin header, or multiple; just ignore. ++ return &Status{s: &spb.Status{Code: int32(code), Message: message}} ++ } ++ st := &spb.Status{} ++ if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { ++ // Probably not a google.rpc.Status proto; do not provide details. ++ return &Status{s: &spb.Status{Code: int32(code), Message: message}} ++ } ++ if st.Code == int32(code) { ++ // The codes match between the grpc-status header and the ++ // grpc-status-details-bin header; use the full details proto. ++ return &Status{s: st} ++ } ++ return &Status{ ++ s: &spb.Status{ ++ Code: int32(codes.Internal), ++ Message: fmt.Sprintf( ++ "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", ++ code, message, st, ++ ), ++ }, ++ } ++} ++ + // New returns a Status representing c and msg. + func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} + } + + // Newf returns New(c, fmt.Sprintf(format, a...)). +-func Newf(c codes.Code, format string, a ...interface{}) *Status { ++func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) + } + +@@ -64,7 +92,7 @@ func Err(c codes.Code, msg string) error { + } + + // Errorf returns Error(c, fmt.Sprintf(format, a...)). +-func Errorf(c codes.Code, format string, a ...interface{}) error { ++func Errorf(c codes.Code, format string, a ...any) error { + return Err(c, fmt.Sprintf(format, a...)) + } + +@@ -120,11 +148,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + + // Details returns a slice of details messages attached to the status. + // If a detail cannot be decoded, the error is returned in place of the detail. +-func (s *Status) Details() []interface{} { ++func (s *Status) Details() []any { + if s == nil || s.s == nil { + return nil + } +- details := make([]interface{}, 0, len(s.s.Details)) ++ details := make([]any, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { +diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +index be5a9c81eb9..b330ccedc8a 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go ++++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +@@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + } + + type itemNode struct { +- it interface{} ++ it any + next *itemNode + } + +@@ -49,7 +49,7 @@ type itemList struct { + tail *itemNode + } + +-func (il *itemList) enqueue(i interface{}) { ++func (il *itemList) enqueue(i any) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n +@@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { + + // peek returns the first item in the list without removing it from the + // list. +-func (il *itemList) peek() interface{} { ++func (il *itemList) peek() any { + return il.head.it + } + +-func (il *itemList) dequeue() interface{} { ++func (il *itemList) dequeue() any { + if il.head == nil { + return nil + } +@@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { + return err + } + +-func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { ++func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { +@@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b + } + + // Note argument f should never be nil. +-func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { ++func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() +@@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo + return true, nil + } + +-func (c *controlBuffer) get(block bool) (interface{}, error) { ++func (c *controlBuffer) get(block bool) (any, error) { + for { + c.mu.Lock() + if c.err != nil { +@@ -830,7 +830,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { + return nil + } + +-func (l *loopyWriter) handle(i interface{}) error { ++func (l *loopyWriter) handle(i any) error { + switch i := i.(type) { + case *incomingWindowUpdate: + l.incomingWindowUpdateHandler(i) +diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go +index 98f80e3fa00..17f7a21b5a9 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go ++++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go +@@ -220,18 +220,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + ++ s.hdrMu.Lock() + if p := st.Proto(); p != nil && len(p.Details) > 0 { ++ delete(s.trailer, grpcStatusDetailsBinHeader) + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + +- h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) ++ h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) + } + +- if md := s.Trailer(); len(md) > 0 { +- for k, vv := range md { ++ if len(s.trailer) > 0 { ++ for k, vv := range s.trailer { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue +@@ -243,6 +245,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro + } + } + } ++ s.hdrMu.Unlock() + }) + + if err == nil { // transport has not been closed +@@ -287,7 +290,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + } + + // writeCustomHeaders sets custom headers set on the stream via SetHeader +-// on the first write call (Write, WriteHeader, or WriteStatus). ++// on the first write call (Write, WriteHeader, or WriteStatus) + func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + +@@ -344,7 +347,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + return err + } + +-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { ++func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() +diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go +index 326bf084800..d6f5c49358b 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go +@@ -330,7 +330,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), +- framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), ++ framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), +@@ -762,7 +762,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, + firstTry := true + var ch chan struct{} + transportDrainRequired := false +- checkForStreamQuota := func(it interface{}) bool { ++ checkForStreamQuota := func(it any) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ +@@ -800,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, + return true + } + var hdrListSizeErr error +- checkForHeaderListSize := func(it interface{}) bool { ++ checkForHeaderListSize := func(it any) bool { + if t.maxSendHeaderListSize == nil { + return true + } +@@ -815,7 +815,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, + return true + } + for { +- success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { ++ success, err := t.controlBuf.executeAndPut(func(it any) bool { + return checkForHeaderListSize(it) && checkForStreamQuota(it) + }, hdr) + if err != nil { +@@ -927,7 +927,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. + rst: rst, + rstCode: rstCode, + } +- addBackStreamQuota := func(interface{}) bool { ++ addBackStreamQuota := func(any) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { +@@ -1080,7 +1080,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { + // for the transport and the stream based on the current bdp + // estimation. + func (t *http2Client) updateFlowControl(n uint32) { +- updateIWS := func(interface{}) bool { ++ updateIWS := func(any) bool { + t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { +@@ -1233,7 +1233,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } +- t.controlBuf.executeAndPut(func(interface{}) bool { ++ t.controlBuf.executeAndPut(func(any) bool { + for _, f := range updateFuncs { + f() + } +@@ -1399,7 +1399,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string +- statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string +@@ -1434,12 +1433,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) +- case "grpc-status-details-bin": +- var err error +- statusGen, err = decodeGRPCStatusDetails(hf.Value) +- if err != nil { +- headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) +- } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" +@@ -1505,14 +1498,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + return + } + +- isHeader := false +- +- // If headerChan hasn't been closed yet +- if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { +- s.headerValid = true +- if !endStream { +- // HEADERS frame block carries a Response-Headers. +- isHeader = true ++ // For headers, set them in s.header and close headerChan. For trailers or ++ // trailers-only, closeStream will set the trailers and close headerChan as ++ // needed. ++ if !endStream { ++ // If headerChan hasn't been closed yet (expected, given we checked it ++ // above, but something else could have potentially closed the whole ++ // stream). ++ if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { ++ s.headerValid = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. +@@ -1520,15 +1514,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + if len(mdata) > 0 { + s.header = mdata + } +- } else { +- // HEADERS frame block carries a Trailers-Only. +- s.noHeaders = true ++ close(s.headerChan) + } +- close(s.headerChan) + } + + for _, sh := range t.statsHandlers { +- if isHeader { ++ if !endStream { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), +@@ -1550,13 +1541,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + return + } + +- if statusGen == nil { +- statusGen = status.New(rawStatusCode, grpcMessage) +- } ++ status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + +- // if client received END_STREAM from server while stream was still active, send RST_STREAM +- rst := s.getState() == streamActive +- t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) ++ // If client received END_STREAM from server while stream was still active, ++ // send RST_STREAM. ++ rstStream := s.getState() == streamActive ++ t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) + } + + // readServerPreface reads and handles the initial settings frame from the +diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go +index ec4eef21342..6fa1eb41992 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go +@@ -165,7 +165,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } +- framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) ++ framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, +@@ -233,7 +233,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + kp.Timeout = defaultServerKeepaliveTimeout + } + if kp.Time != infinity { +- if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { ++ if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } +@@ -342,7 +342,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + + // operateHeaders takes action on the decoded headers. Returns an error if fatal + // error encountered and transport needs to close, otherwise returns nil. +-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { ++func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() +@@ -561,7 +561,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + } + if t.inTapHandle != nil { + var err error +- if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { ++ if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { + t.mu.Unlock() + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) +@@ -592,7 +592,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } +- s.ctx = traceCtx(s.ctx, s.method) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ +@@ -630,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + // HandleStreams receives incoming streams using the given handler. This is + // typically run in a separate goroutine. + // traceCtx attaches trace to ctx and returns the new context. +-func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { ++func (t *http2Server) HandleStreams(handle func(*Stream)) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() +@@ -665,7 +664,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: +- if err := t.operateHeaders(frame, handle, traceCtx); err != nil { ++ if err := t.operateHeaders(frame, handle); err != nil { + t.Close(err) + break + } +@@ -850,7 +849,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + } + return nil + }) +- t.controlBuf.executeAndPut(func(interface{}) bool { ++ t.controlBuf.executeAndPut(func(any) bool { + for _, f := range updateFuncs { + f() + } +@@ -934,7 +933,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) + return headerFields + } + +-func (t *http2Server) checkForHeaderListSize(it interface{}) bool { ++func (t *http2Server) checkForHeaderListSize(it any) bool { + if t.maxSendHeaderListSize == nil { + return true + } +@@ -1053,12 +1052,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { ++ // Do not use the user's grpc-status-details-bin (if present) if we are ++ // even attempting to set our own. ++ delete(s.trailer, grpcStatusDetailsBinHeader) + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) + } else { +- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) ++ headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) + } + } + +diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go +index 19cbb18f5ab..dc29d590e91 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go +@@ -30,15 +30,13 @@ import ( + "net/url" + "strconv" + "strings" ++ "sync" + "time" + "unicode/utf8" + +- "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +- spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +- "google.golang.org/grpc/status" + ) + + const ( +@@ -87,6 +85,8 @@ var ( + } + ) + ++var grpcStatusDetailsBinHeader = "grpc-status-details-bin" ++ + // isReservedHeader checks whether hdr belongs to HTTP2 headers + // reserved by gRPC protocol. Any other headers are classified as the + // user-specified metadata. +@@ -102,7 +102,6 @@ func isReservedHeader(hdr string) bool { + "grpc-message", + "grpc-status", + "grpc-timeout", +- "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. +@@ -153,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { + return v, nil + } + +-func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { +- v, err := decodeBinHeader(rawDetails) +- if err != nil { +- return nil, err +- } +- st := &spb.Status{} +- if err = proto.Unmarshal(v, st); err != nil { +- return nil, err +- } +- return status.FromProto(st), nil +-} +- + type timeoutUnit uint8 + + const ( +@@ -309,6 +296,7 @@ func decodeGrpcMessageUnchecked(msg string) string { + } + + type bufWriter struct { ++ pool *sync.Pool + buf []byte + offset int + batchSize int +@@ -316,12 +304,17 @@ type bufWriter struct { + err error + } + +-func newBufWriter(conn net.Conn, batchSize int) *bufWriter { +- return &bufWriter{ +- buf: make([]byte, batchSize*2), ++func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { ++ w := &bufWriter{ + batchSize: batchSize, + conn: conn, ++ pool: pool, ++ } ++ // this indicates that we should use non shared buf ++ if pool == nil { ++ w.buf = make([]byte, batchSize) + } ++ return w + } + + func (w *bufWriter) Write(b []byte) (n int, err error) { +@@ -332,19 +325,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { + n, err = w.conn.Write(b) + return n, toIOError(err) + } ++ if w.buf == nil { ++ b := w.pool.Get().(*[]byte) ++ w.buf = *b ++ } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { +- err = w.Flush() ++ err = w.flushKeepBuffer() + } + } + return n, err + } + + func (w *bufWriter) Flush() error { ++ err := w.flushKeepBuffer() ++ // Only release the buffer if we are in a "shared" mode ++ if w.buf != nil && w.pool != nil { ++ b := w.buf ++ w.pool.Put(&b) ++ w.buf = nil ++ } ++ return err ++} ++ ++func (w *bufWriter) flushKeepBuffer() error { + if w.err != nil { + return w.err + } +@@ -381,7 +389,10 @@ type framer struct { + fr *http2.Framer + } + +-func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { ++var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) ++var writeBufferMutex sync.Mutex ++ ++func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } +@@ -389,7 +400,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } +- w := newBufWriter(conn, writeBufferSize) ++ var pool *sync.Pool ++ if sharedWriteBuffer { ++ pool = getWriteBufferPool(writeBufferSize) ++ } ++ w := newBufWriter(conn, writeBufferSize, pool) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), +@@ -403,6 +418,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList + return f + } + ++func getWriteBufferPool(writeBufferSize int) *sync.Pool { ++ writeBufferMutex.Lock() ++ defer writeBufferMutex.Unlock() ++ size := writeBufferSize * 2 ++ pool, ok := writeBufferPoolMap[size] ++ if ok { ++ return pool ++ } ++ pool = &sync.Pool{ ++ New: func() any { ++ b := make([]byte, size) ++ return &b ++ }, ++ } ++ writeBufferPoolMap[size] = pool ++ return pool ++} ++ + // parseDialTarget returns the network and address to pass to dialer. + func parseDialTarget(target string) (string, string) { + net := "tcp" +diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go +index aa1c896595d..aac056e723b 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/transport.go ++++ b/vendor/google.golang.org/grpc/internal/transport/transport.go +@@ -43,10 +43,6 @@ import ( + "google.golang.org/grpc/tap" + ) + +-// ErrNoHeaders is used as a signal that a trailers only response was received, +-// and is not a real error. +-var ErrNoHeaders = errors.New("stream has no headers") +- + const logLevel = 2 + + type bufferPool struct { +@@ -56,7 +52,7 @@ type bufferPool struct { + func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ +- New: func() interface{} { ++ New: func() any { + return new(bytes.Buffer) + }, + }, +@@ -390,14 +386,10 @@ func (s *Stream) Header() (metadata.MD, error) { + } + s.waitOnHeader() + +- if !s.headerValid { ++ if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + +- if s.noHeaders { +- return nil, ErrNoHeaders +- } +- + return s.header.Copy(), nil + } + +@@ -559,6 +551,7 @@ type ServerConfig struct { + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int ++ SharedWriteBuffer bool + ChannelzParentID *channelz.Identifier + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 +@@ -592,6 +585,8 @@ type ConnectOptions struct { + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int ++ // SharedWriteBuffer indicates whether connections should reuse write buffer ++ SharedWriteBuffer bool + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID *channelz.Identifier + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. +@@ -703,7 +698,7 @@ type ClientTransport interface { + // Write methods for a given Stream will be called serially. + type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. +- HandleStreams(func(*Stream), func(context.Context, string) context.Context) ++ HandleStreams(func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. +@@ -736,7 +731,7 @@ type ServerTransport interface { + } + + // connectionErrorf creates an ConnectionError with the specified error description. +-func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { ++func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, +diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go +index 02f97595124..236837f4157 100644 +--- a/vendor/google.golang.org/grpc/picker_wrapper.go ++++ b/vendor/google.golang.org/grpc/picker_wrapper.go +@@ -28,21 +28,26 @@ import ( + "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" ++ "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + ) + + // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick + // actions and unblock when there's a picker update. + type pickerWrapper struct { +- mu sync.Mutex +- done bool +- idle bool +- blockingCh chan struct{} +- picker balancer.Picker ++ mu sync.Mutex ++ done bool ++ idle bool ++ blockingCh chan struct{} ++ picker balancer.Picker ++ statsHandlers []stats.Handler // to record blocking picker calls + } + +-func newPickerWrapper() *pickerWrapper { +- return &pickerWrapper{blockingCh: make(chan struct{})} ++func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { ++ return &pickerWrapper{ ++ blockingCh: make(chan struct{}), ++ statsHandlers: statsHandlers, ++ } + } + + // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +@@ -95,6 +100,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. + var ch chan struct{} + + var lastPickErr error ++ + for { + pw.mu.Lock() + if pw.done { +@@ -129,6 +135,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. + continue + } + ++ // If the channel is set, it means that the pick call had to wait for a ++ // new picker at some point. Either it's the first iteration and this ++ // function received the first picker, or a picker errored with ++ // ErrNoSubConnAvailable or errored with failfast set to false, which ++ // will trigger a continue to the next iteration. In the first case this ++ // conditional will hit if this call had to block (the channel is set). ++ // In the second case, the only way it will get to this conditional is ++ // if there is a new picker. ++ if ch != nil { ++ for _, sh := range pw.statsHandlers { ++ sh.HandleRPC(ctx, &stats.PickerUpdated{}) ++ } ++ } ++ + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() +diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go +index abe266b021d..2e9cf66b4af 100644 +--- a/vendor/google.golang.org/grpc/pickfirst.go ++++ b/vendor/google.golang.org/grpc/pickfirst.go +@@ -26,12 +26,18 @@ import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" ++ internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" ++ "google.golang.org/grpc/internal/pretty" ++ "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + ) + +-// PickFirstBalancerName is the name of the pick_first balancer. +-const PickFirstBalancerName = "pick_first" ++const ( ++ // PickFirstBalancerName is the name of the pick_first balancer. ++ PickFirstBalancerName = "pick_first" ++ logPrefix = "[pick-first-lb %p] " ++) + + func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +@@ -40,7 +46,9 @@ func newPickfirstBuilder() balancer.Builder { + type pickfirstBuilder struct{} + + func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +- return &pickfirstBalancer{cc: cc} ++ b := &pickfirstBalancer{cc: cc} ++ b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) ++ return b + } + + func (*pickfirstBuilder) Name() string { +@@ -57,23 +65,36 @@ type pfConfig struct { + } + + func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +- cfg := &pfConfig{} +- if err := json.Unmarshal(js, cfg); err != nil { ++ if !envconfig.PickFirstLBConfig { ++ // Prior to supporting loadbalancing configuration, the pick_first LB ++ // policy did not implement the balancer.ConfigParser interface. This ++ // meant that if a non-empty configuration was passed to it, the service ++ // config unmarshaling code would throw a warning log, but would ++ // continue using the pick_first LB policy. The code below ensures the ++ // same behavior is retained if the env var is not set. ++ if string(js) != "{}" { ++ logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) ++ } ++ return nil, nil ++ } ++ ++ var cfg pfConfig ++ if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil + } + + type pickfirstBalancer struct { ++ logger *internalgrpclog.PrefixLogger + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn +- cfg *pfConfig + } + + func (b *pickfirstBalancer) ResolverError(err error) { +- if logger.V(2) { +- logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) ++ if b.logger.V(2) { ++ b.logger.Infof("Received error from the name resolver: %v", err) + } + if b.subConn == nil { + b.state = connectivity.TransientFailure +@@ -96,35 +117,44 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { +- // Remove the old subConn. All addresses were removed, so it is no longer +- // valid. +- b.cc.RemoveSubConn(b.subConn) ++ // Shut down the old subConn. All addresses were removed, so it is ++ // no longer valid. ++ b.subConn.Shutdown() + b.subConn = nil + } + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + +- if state.BalancerConfig != nil { +- cfg, ok := state.BalancerConfig.(*pfConfig) +- if !ok { +- return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) +- } +- b.cfg = cfg ++ // We don't have to guard this block with the env var because ParseConfig ++ // already does so. ++ cfg, ok := state.BalancerConfig.(pfConfig) ++ if state.BalancerConfig != nil && !ok { ++ return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } +- +- if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { ++ if cfg.ShuffleAddressList { ++ addrs = append([]resolver.Address{}, addrs...) + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } ++ ++ if b.logger.V(2) { ++ b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) ++ } ++ + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, addrs) + return nil + } + +- subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) ++ var subConn balancer.SubConn ++ subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ ++ StateListener: func(state balancer.SubConnState) { ++ b.updateSubConnState(subConn, state) ++ }, ++ }) + if err != nil { +- if logger.V(2) { +- logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) ++ if b.logger.V(2) { ++ b.logger.Infof("Failed to create new SubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ +@@ -143,13 +173,19 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState + return nil + } + ++// UpdateSubConnState is unused as a StateListener is always registered when ++// creating SubConns. + func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { +- if logger.V(2) { +- logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) ++ b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) ++} ++ ++func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { ++ if b.logger.V(2) { ++ b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) + } + if b.subConn != subConn { +- if logger.V(2) { +- logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") ++ if b.logger.V(2) { ++ b.logger.Infof("Ignored state change because subConn is not recognized") + } + return + } +diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go +index cd45547854f..73bd6336433 100644 +--- a/vendor/google.golang.org/grpc/preloader.go ++++ b/vendor/google.golang.org/grpc/preloader.go +@@ -37,7 +37,7 @@ type PreparedMsg struct { + } + + // Encode marshalls and compresses the message using the codec and compressor for the stream. +-func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { ++func (p *PreparedMsg) Encode(s Stream, msg any) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { +diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go +index f27978e1281..0a4262342f3 100644 +--- a/vendor/google.golang.org/grpc/resolver/manual/manual.go ++++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go +@@ -26,13 +26,16 @@ import ( + "google.golang.org/grpc/resolver" + ) + +-// NewBuilderWithScheme creates a new test resolver builder with the given scheme. ++// NewBuilderWithScheme creates a new manual resolver builder with the given ++// scheme. Every instance of the manual resolver may only ever be used with a ++// single grpc.ClientConn. Otherwise, bad things will happen. + func NewBuilderWithScheme(scheme string) *Resolver { + return &Resolver{ +- BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, +- ResolveNowCallback: func(resolver.ResolveNowOptions) {}, +- CloseCallback: func() {}, +- scheme: scheme, ++ BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, ++ UpdateStateCallback: func(error) {}, ++ ResolveNowCallback: func(resolver.ResolveNowOptions) {}, ++ CloseCallback: func() {}, ++ scheme: scheme, + } + } + +@@ -42,6 +45,11 @@ type Resolver struct { + // BuildCallback is called when the Build method is called. Must not be + // nil. Must not be changed after the resolver may be built. + BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) ++ // UpdateStateCallback is called when the UpdateState method is called on ++ // the resolver. The value passed as argument to this callback is the value ++ // returned by the resolver.ClientConn. Must not be nil. Must not be ++ // changed after the resolver may be built. ++ UpdateStateCallback func(err error) + // ResolveNowCallback is called when the ResolveNow method is called on the + // resolver. Must not be nil. Must not be changed after the resolver may + // be built. +@@ -52,30 +60,34 @@ type Resolver struct { + scheme string + + // Fields actually belong to the resolver. +- mu sync.Mutex // Guards access to CC. +- CC resolver.ClientConn +- bootstrapState *resolver.State ++ // Guards access to below fields. ++ mu sync.Mutex ++ CC resolver.ClientConn ++ // Storing the most recent state update makes this resolver resilient to ++ // restarts, which is possible with channel idleness. ++ lastSeenState *resolver.State + } + + // InitialState adds initial state to the resolver so that UpdateState doesn't + // need to be explicitly called after Dial. + func (r *Resolver) InitialState(s resolver.State) { +- r.bootstrapState = &s ++ r.lastSeenState = &s + } + + // Build returns itself for Resolver, because it's both a builder and a resolver. + func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { ++ r.BuildCallback(target, cc, opts) + r.mu.Lock() + r.CC = cc +- r.mu.Unlock() +- r.BuildCallback(target, cc, opts) +- if r.bootstrapState != nil { +- r.UpdateState(*r.bootstrapState) ++ if r.lastSeenState != nil { ++ err := r.CC.UpdateState(*r.lastSeenState) ++ go r.UpdateStateCallback(err) + } ++ r.mu.Unlock() + return r, nil + } + +-// Scheme returns the test scheme. ++// Scheme returns the manual resolver's scheme. + func (r *Resolver) Scheme() string { + return r.scheme + } +@@ -93,8 +105,10 @@ func (r *Resolver) Close() { + // UpdateState calls CC.UpdateState. + func (r *Resolver) UpdateState(s resolver.State) { + r.mu.Lock() +- r.CC.UpdateState(s) ++ err := r.CC.UpdateState(s) ++ r.lastSeenState = &s + r.mu.Unlock() ++ r.UpdateStateCallback(err) + } + + // ReportError calls CC.ReportError. +diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go +index efcb7f3efd8..804be887de0 100644 +--- a/vendor/google.golang.org/grpc/resolver/map.go ++++ b/vendor/google.golang.org/grpc/resolver/map.go +@@ -20,7 +20,7 @@ package resolver + + type addressMapEntry struct { + addr Address +- value interface{} ++ value any + } + + // AddressMap is a map of addresses to arbitrary values taking into account +@@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { + } + + // Get returns the value for the address in the map, if present. +-func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { ++func (a *AddressMap) Get(addr Address) (value any, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { +@@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + } + + // Set updates or adds the value to the address in the map. +-func (a *AddressMap) Set(addr Address, value interface{}) { ++func (a *AddressMap) Set(addr Address, value any) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { +@@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { + } + + // Values returns a slice of all current map values. +-func (a *AddressMap) Values() []interface{} { +- ret := make([]interface{}, 0, a.Len()) ++func (a *AddressMap) Values() []any { ++ ret := make([]any, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) +diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go +index 353c10b69a5..11384e228e5 100644 +--- a/vendor/google.golang.org/grpc/resolver/resolver.go ++++ b/vendor/google.golang.org/grpc/resolver/resolver.go +@@ -77,25 +77,6 @@ func GetDefaultScheme() string { + return defaultScheme + } + +-// AddressType indicates the address type returned by name resolution. +-// +-// Deprecated: use Attributes in Address instead. +-type AddressType uint8 +- +-const ( +- // Backend indicates the address is for a backend server. +- // +- // Deprecated: use Attributes in Address instead. +- Backend AddressType = iota +- // GRPCLB indicates the address is for a grpclb load balancer. +- // +- // Deprecated: to select the GRPCLB load balancing policy, use a service +- // config with a corresponding loadBalancingConfig. To supply balancer +- // addresses to the GRPCLB load balancing policy, set State.Attributes +- // using balancer/grpclb/state.Set. +- GRPCLB +-) +- + // Address represents a server the client connects to. + // + // # Experimental +@@ -111,9 +92,6 @@ type Address struct { + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // +- // If Type is GRPCLB, ServerName should be the name of the remote load +- // balancer, not the name of the backend. +- // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. +@@ -126,27 +104,29 @@ type Address struct { + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attributes do not affect SubConn + // creation, connection establishment, handshaking, etc. +- BalancerAttributes *attributes.Attributes +- +- // Type is the type of this address. + // +- // Deprecated: use Attributes instead. +- Type AddressType ++ // Deprecated: when an Address is inside an Endpoint, this field should not ++ // be used, and it will eventually be removed entirely. ++ BalancerAttributes *attributes.Attributes + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. +- Metadata interface{} ++ Metadata any + } + + // Equal returns whether a and o are identical. Metadata is compared directly, + // not with any recursive introspection. ++// ++// This method compares all fields of the address. When used to tell apart ++// addresses during subchannel creation or connection establishment, it might be ++// more appropriate for the caller to implement custom equality logic. + func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && +- a.Type == o.Type && a.Metadata == o.Metadata ++ a.Metadata == o.Metadata + } + + // String returns JSON formatted string representation of the address. +@@ -190,11 +170,37 @@ type BuildOptions struct { + Dialer func(context.Context, string) (net.Conn, error) + } + ++// An Endpoint is one network endpoint, or server, which may have multiple ++// addresses with which it can be accessed. ++type Endpoint struct { ++ // Addresses contains a list of addresses used to access this endpoint. ++ Addresses []Address ++ ++ // Attributes contains arbitrary data about this endpoint intended for ++ // consumption by the LB policy. ++ Attributes *attributes.Attributes ++} ++ + // State contains the current Resolver state relevant to the ClientConn. + type State struct { + // Addresses is the latest set of resolved addresses for the target. ++ // ++ // If a resolver sets Addresses but does not set Endpoints, one Endpoint ++ // will be created for each Address before the State is passed to the LB ++ // policy. The BalancerAttributes of each entry in Addresses will be set ++ // in Endpoints.Attributes, and be cleared in the Endpoint's Address's ++ // BalancerAttributes. ++ // ++ // Soon, Addresses will be deprecated and replaced fully by Endpoints. + Addresses []Address + ++ // Endpoints is the latest set of resolved endpoints for the target. ++ // ++ // If a resolver produces a State containing Endpoints but not Addresses, ++ // it must take care to ensure the LB policies it selects will support ++ // Endpoints. ++ Endpoints []Endpoint ++ + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. +@@ -254,20 +260,7 @@ type ClientConn interface { + // target does not contain a scheme or if the parsed scheme is not registered + // (i.e. no corresponding resolver available to resolve the endpoint), we will + // apply the default scheme, and will attempt to reparse it. +-// +-// Examples: +-// +-// - "dns://some_authority/foo.bar" +-// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +-// - "foo.bar" +-// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +-// - "unknown_scheme://authority/endpoint" +-// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} + type Target struct { +- // Deprecated: use URL.Scheme instead. +- Scheme string +- // Deprecated: use URL.Host instead. +- Authority string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial +@@ -321,10 +314,3 @@ type Resolver interface { + // Close closes the resolver. + Close() + } +- +-// UnregisterForTesting removes the resolver builder with the given scheme from the +-// resolver map. +-// This function is for testing only. +-func UnregisterForTesting(scheme string) { +- delete(m, scheme) +-} +diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +index b408b3688f2..d6833056084 100644 +--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go ++++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +@@ -133,7 +133,7 @@ func (ccr *ccResolverWrapper) close() { + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. +- <-ccr.serializer.Done ++ <-ccr.serializer.Done() + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. +@@ -152,6 +152,14 @@ func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) + // which includes addresses and service config. + func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + errCh := make(chan error, 1) ++ if s.Endpoints == nil { ++ s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) ++ for _, a := range s.Addresses { ++ ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} ++ ep.Addresses[0].BalancerAttributes = nil ++ s.Endpoints = append(s.Endpoints, ep) ++ } ++ } + ok := ccr.serializer.Schedule(func(context.Context) { + ccr.addChannelzTraceEvent(s) + ccr.curState = s +diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go +index 2030736a306..b7723aa09cb 100644 +--- a/vendor/google.golang.org/grpc/rpc_util.go ++++ b/vendor/google.golang.org/grpc/rpc_util.go +@@ -75,7 +75,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + } + return &gzipCompressor{ + pool: sync.Pool{ +- New: func() interface{} { ++ New: func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) +@@ -577,6 +577,9 @@ type parser struct { + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte ++ ++ // recvBufferPool is the pool of shared receive buffers. ++ recvBufferPool SharedBufferPool + } + + // recvMsg reads a complete gRPC message from the stream. +@@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } +- // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead +- // of making it for each message: +- msg = make([]byte, int(length)) ++ msg = p.recvBufferPool.Get(int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF +@@ -625,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt + // encode serializes msg and returns a buffer containing the message, or an + // error if it is too large to be transmitted by grpc. If msg is nil, it + // generates an empty message. +-func encode(c baseCodec, msg interface{}) ([]byte, error) { ++func encode(c baseCodec, msg any) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } +@@ -692,7 +693,7 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + return hdr, data + } + +-func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { ++func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, +@@ -726,12 +727,12 @@ type payloadInfo struct { + } + + func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { +- pf, d, err := p.recvMsg(maxReceiveMessageSize) ++ pf, buf, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { +- payInfo.compressedLength = len(d) ++ payInfo.compressedLength = len(buf) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { +@@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { +- d, err = dc.Do(bytes.NewReader(d)) +- size = len(d) ++ buf, err = dc.Do(bytes.NewReader(buf)) ++ size = len(buf) + } else { +- d, size, err = decompress(compressor, d, maxReceiveMessageSize) ++ buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) +@@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + } +- return d, nil ++ return buf, nil + } + + // Using compressor, decompress d, returning data and size. +@@ -791,16 +792,18 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize + // For the two compressor parameters, both should not be set, but if they are, + // dc takes precedence over compressor. + // TODO(dfawley): wrap the old compressor/decompressor using the new API? +-func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { +- d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) ++func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { ++ buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } +- if err := c.Unmarshal(d, m); err != nil { ++ if err := c.Unmarshal(buf, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) + } + if payInfo != nil { +- payInfo.uncompressedBytes = d ++ payInfo.uncompressedBytes = buf ++ } else { ++ p.recvBufferPool.Put(&buf) + } + return nil + } +@@ -860,19 +863,22 @@ func ErrorDesc(err error) string { + // Errorf returns nil if c is OK. + // + // Deprecated: use status.Errorf instead. +-func Errorf(c codes.Code, format string, a ...interface{}) error { ++func Errorf(c codes.Code, format string, a ...any) error { + return status.Errorf(c, format, a...) + } + ++var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) ++var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) ++ + // toRPCErr converts an error into an error from the status package. + func toRPCErr(err error) error { + switch err { + case nil, io.EOF: + return err + case context.DeadlineExceeded: +- return status.Error(codes.DeadlineExceeded, err.Error()) ++ return errContextDeadline + case context.Canceled: +- return status.Error(codes.Canceled, err.Error()) ++ return errContextCanceled + case io.ErrUnexpectedEOF: + return status.Error(codes.Internal, err.Error()) + } +diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go +index 8869cc906f2..8f60d421437 100644 +--- a/vendor/google.golang.org/grpc/server.go ++++ b/vendor/google.golang.org/grpc/server.go +@@ -86,7 +86,7 @@ func init() { + var statusOK = status.New(codes.OK, "") + var logger = grpclog.Component("core") + +-type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) ++type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) + + // MethodDesc represents an RPC service's method specification. + type MethodDesc struct { +@@ -99,20 +99,20 @@ type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. +- HandlerType interface{} ++ HandlerType any + Methods []MethodDesc + Streams []StreamDesc +- Metadata interface{} ++ Metadata any + } + + // serviceInfo wraps information about a service. It is very similar to + // ServiceDesc and is constructed from it for internal purposes. + type serviceInfo struct { + // Contains the implementation for the methods in this service. +- serviceImpl interface{} ++ serviceImpl any + methods map[string]*MethodDesc + streams map[string]*StreamDesc +- mdata interface{} ++ mdata any + } + + // Server is a gRPC server to serve RPC requests. +@@ -164,10 +164,12 @@ type serverOptions struct { + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int ++ sharedWriteBuffer bool + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 ++ recvBufferPool SharedBufferPool + } + + var defaultServerOptions = serverOptions{ +@@ -177,6 +179,7 @@ var defaultServerOptions = serverOptions{ + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, ++ recvBufferPool: nopBufferPool{}, + } + var globalServerOptions []ServerOption + +@@ -228,6 +231,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} + } + ++// SharedWriteBuffer allows reusing per-connection transport write buffer. ++// If this option is set to true every connection will release the buffer after ++// flushing the data on the wire. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func SharedWriteBuffer(val bool) ServerOption { ++ return newFuncServerOption(func(o *serverOptions) { ++ o.sharedWriteBuffer = val ++ }) ++} ++ + // WriteBufferSize determines how much data can be batched before doing a write + // on the wire. The corresponding memory allocation for this buffer will be + // twice the size to keep syscalls low. The default value for this buffer is +@@ -268,9 +285,9 @@ func InitialConnWindowSize(s int32) ServerOption { + + // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. + func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { +- if kp.Time > 0 && kp.Time < time.Second { ++ if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") +- kp.Time = time.Second ++ kp.Time = internal.KeepaliveMinServerPingTime + } + + return newFuncServerOption(func(o *serverOptions) { +@@ -550,6 +567,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { + }) + } + ++// RecvBufferPool returns a ServerOption that configures the server ++// to use the provided shared buffer pool for parsing incoming messages. Depending ++// on the application's workload, this could result in reduced memory allocation. ++// ++// If you are unsure about how to implement a memory pool but want to utilize one, ++// begin with grpc.NewSharedBufferPool. ++// ++// Note: The shared buffer pool feature will not be active if any of the following ++// options are used: StatsHandler, EnableTracing, or binary logging. In such ++// cases, the shared buffer pool will be ignored. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { ++ return newFuncServerOption(func(o *serverOptions) { ++ o.recvBufferPool = bufferPool ++ }) ++} ++ + // serverWorkerResetThreshold defines how often the stack must be reset. Every + // N requests, by spawning a new goroutine in its place, a worker can reset its + // stack so that large stacks don't live in memory forever. 2^16 should allow +@@ -625,7 +663,7 @@ func NewServer(opt ...ServerOption) *Server { + + // printf records an event in s's event log, unless s has been stopped. + // REQUIRES s.mu is held. +-func (s *Server) printf(format string, a ...interface{}) { ++func (s *Server) printf(format string, a ...any) { + if s.events != nil { + s.events.Printf(format, a...) + } +@@ -633,7 +671,7 @@ func (s *Server) printf(format string, a ...interface{}) { + + // errorf records an error in s's event log, unless s has been stopped. + // REQUIRES s.mu is held. +-func (s *Server) errorf(format string, a ...interface{}) { ++func (s *Server) errorf(format string, a ...any) { + if s.events != nil { + s.events.Errorf(format, a...) + } +@@ -648,14 +686,14 @@ type ServiceRegistrar interface { + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. +- RegisterService(desc *ServiceDesc, impl interface{}) ++ RegisterService(desc *ServiceDesc, impl any) + } + + // RegisterService registers a service and its implementation to the gRPC + // server. It is called from the IDL generated code. This must be called before + // invoking Serve. If ss is non-nil (for legacy code), its type is checked to + // ensure it implements sd.HandlerType. +-func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { ++func (s *Server) RegisterService(sd *ServiceDesc, ss any) { + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) +@@ -666,7 +704,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + s.register(sd, ss) + } + +-func (s *Server) register(sd *ServiceDesc, ss interface{}) { ++func (s *Server) register(sd *ServiceDesc, ss any) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) +@@ -707,7 +745,7 @@ type MethodInfo struct { + type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. +- Metadata interface{} ++ Metadata any + } + + // GetServiceInfo returns a map from service names to ServiceInfo. +@@ -908,6 +946,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, ++ SharedWriteBuffer: s.opts.sharedWriteBuffer, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, +@@ -944,7 +983,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { + f := func() { + defer streamQuota.release() + defer wg.Done() +- s.handleStream(st, stream, s.traceInfo(st, stream)) ++ s.handleStream(st, stream) + } + + if s.opts.numServerWorkers > 0 { +@@ -956,12 +995,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { + } + } + go f() +- }, func(ctx context.Context, method string) context.Context { +- if !EnableTracing { +- return ctx +- } +- tr := trace.New("grpc.Recv."+methodFamily(method), method) +- return trace.NewContext(ctx, tr) + }) + wg.Wait() + } +@@ -1010,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.serveStreams(st) + } + +-// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +-// If tracing is not enabled, it returns nil. +-func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { +- if !EnableTracing { +- return nil +- } +- tr, ok := trace.FromContext(stream.Context()) +- if !ok { +- return nil +- } +- +- trInfo = &traceInfo{ +- tr: tr, +- firstLine: firstLine{ +- client: false, +- remoteAddr: st.RemoteAddr(), +- }, +- } +- if dl, ok := stream.Context().Deadline(); ok { +- trInfo.firstLine.deadline = time.Until(dl) +- } +- return trInfo +-} +- + func (s *Server) addConn(addr string, st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() +@@ -1094,7 +1103,7 @@ func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) + } + +-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { ++func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) +@@ -1113,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str + err = t.Write(stream, hdr, payload, opts) + if err == nil { + for _, sh := range s.opts.statsHandlers { +- sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) ++ sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + } + } + return err +@@ -1141,7 +1150,7 @@ func chainUnaryServerInterceptors(s *Server) { + } + + func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { +- return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { ++ return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } + } +@@ -1150,12 +1159,12 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info + if curr == len(interceptors)-1 { + return finalHandler + } +- return func(ctx context.Context, req interface{}) (interface{}, error) { ++ return func(ctx context.Context, req any) (any, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } + } + +-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { ++func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { +@@ -1169,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + IsClientStream: false, + IsServerStream: false, + } +- sh.HandleRPC(stream.Context(), statsBegin) ++ sh.HandleRPC(ctx, statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) +@@ -1187,7 +1196,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { +- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ++ trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() +@@ -1201,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } +- sh.HandleRPC(stream.Context(), end) ++ sh.HandleRPC(ctx, end) + } + + if channelz.IsOn() { +@@ -1223,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + } + } + if len(binlogs) != 0 { +- ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, +@@ -1294,7 +1302,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if len(shs) != 0 || len(binlogs) != 0 { + payInfo = &payloadInfo{} + } +- d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) ++ d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) +@@ -1304,12 +1312,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if channelz.IsOn() { + t.IncrMsgRecv() + } +- df := func(v interface{}) error { ++ df := func(v any) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + for _, sh := range shs { +- sh.HandleRPC(stream.Context(), &stats.InPayload{ ++ sh.HandleRPC(ctx, &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + Length: len(d), +@@ -1323,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Message: d, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), cm) ++ binlog.Log(ctx, cm) + } + } + if trInfo != nil { +@@ -1331,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + } + return nil + } +- ctx := NewContextWithServerTransportStream(stream.Context(), stream) ++ ctx = NewContextWithServerTransportStream(ctx, stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) +@@ -1356,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Header: h, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), sh) ++ binlog.Log(ctx, sh) + } + } + st := &binarylog.ServerTrailer{ +@@ -1364,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Err: appErr, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), st) ++ binlog.Log(ctx, st) + } + } + return appErr +@@ -1379,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } +- if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { ++ if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err +@@ -1406,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Err: appErr, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), sh) +- binlog.Log(stream.Context(), st) ++ binlog.Log(ctx, sh) ++ binlog.Log(ctx, st) + } + } + return err +@@ -1421,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Message: reply, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), sh) +- binlog.Log(stream.Context(), sm) ++ binlog.Log(ctx, sh) ++ binlog.Log(ctx, sm) + } + } + if channelz.IsOn() { +@@ -1440,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Err: appErr, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), st) ++ binlog.Log(ctx, st) + } + } + return t.WriteStatus(stream, statusOK) +@@ -1468,7 +1476,7 @@ func chainStreamServerInterceptors(s *Server) { + } + + func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { +- return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { ++ return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } + } +@@ -1477,12 +1485,12 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf + if curr == len(interceptors)-1 { + return finalHandler + } +- return func(srv interface{}, stream ServerStream) error { ++ return func(srv any, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } + } + +-func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { ++func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } +@@ -1496,15 +1504,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + IsServerStream: sd.ServerStreams, + } + for _, sh := range shs { +- sh.HandleRPC(stream.Context(), statsBegin) ++ sh.HandleRPC(ctx, statsBegin) + } + } +- ctx := NewContextWithServerTransportStream(stream.Context(), stream) ++ ctx = NewContextWithServerTransportStream(ctx, stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, +- p: &parser{r: stream}, ++ p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, +@@ -1518,7 +1526,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { +- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ++ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() +@@ -1535,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + end.Error = toRPCErr(err) + } + for _, sh := range shs { +- sh.HandleRPC(stream.Context(), end) ++ sh.HandleRPC(ctx, end) + } + } + +@@ -1577,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range ss.binlogs { +- binlog.Log(stream.Context(), logEntry) ++ binlog.Log(ctx, logEntry) + } + } + +@@ -1621,7 +1629,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error +- var server interface{} ++ var server any + if info != nil { + server = info.serviceImpl + } +@@ -1655,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + Err: appErr, + } + for _, binlog := range ss.binlogs { +- binlog.Log(stream.Context(), st) ++ binlog.Log(ctx, st) + } + } + t.WriteStatus(ss.s, appStatus) +@@ -1673,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + Err: appErr, + } + for _, binlog := range ss.binlogs { +- binlog.Log(stream.Context(), st) ++ binlog.Log(ctx, st) + } + } + return t.WriteStatus(ss.s, statusOK) + } + +-func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { ++func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { ++ ctx := stream.Context() ++ var ti *traceInfo ++ if EnableTracing { ++ tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) ++ ctx = trace.NewContext(ctx, tr) ++ ti = &traceInfo{ ++ tr: tr, ++ firstLine: firstLine{ ++ client: false, ++ remoteAddr: t.RemoteAddr(), ++ }, ++ } ++ if dl, ok := ctx.Deadline(); ok { ++ ti.firstLine.deadline = time.Until(dl) ++ } ++ } ++ + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { +- if trInfo != nil { +- trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) ++ ti.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { +- if trInfo != nil { +- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ++ ti.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } +- if trInfo != nil { +- trInfo.tr.Finish() ++ if ti != nil { ++ ti.tr.Finish() + } + return + } +@@ -1709,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str + srv, knownService := s.services[service] + if knownService { + if md, ok := srv.methods[method]; ok { +- s.processUnaryRPC(t, stream, srv, md, trInfo) ++ s.processUnaryRPC(ctx, t, stream, srv, md, ti) + return + } + if sd, ok := srv.streams[method]; ok { +- s.processStreamingRPC(t, stream, srv, sd, trInfo) ++ s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { +- s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) ++ s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + return + } + var errDesc string +@@ -1728,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } +- if trInfo != nil { +- trInfo.tr.LazyPrintf("%s", errDesc) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyPrintf("%s", errDesc) ++ ti.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { +- if trInfo != nil { +- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ++ ti.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } +- if trInfo != nil { +- trInfo.tr.Finish() ++ if ti != nil { ++ ti.tr.Finish() + } + } + +@@ -2054,12 +2079,12 @@ func validateSendCompressor(name, clientCompressors string) error { + // atomicSemaphore implements a blocking, counting semaphore. acquire should be + // called synchronously; release may be called asynchronously. + type atomicSemaphore struct { +- n int64 ++ n atomic.Int64 + wait chan struct{} + } + + func (q *atomicSemaphore) acquire() { +- if atomic.AddInt64(&q.n, -1) < 0 { ++ if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +@@ -2070,12 +2095,14 @@ func (q *atomicSemaphore) release() { + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. +- if atomic.AddInt64(&q.n, 1) <= 0 { ++ if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } + } + + func newHandlerQuota(n uint32) *atomicSemaphore { +- return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)} ++ a := &atomicSemaphore{wait: make(chan struct{}, 1)} ++ a.n.Store(int64(n)) ++ return a + } +diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go +new file mode 100644 +index 00000000000..48a64cfe8e2 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go +@@ -0,0 +1,154 @@ ++/* ++ * ++ * Copyright 2023 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package grpc ++ ++import "sync" ++ ++// SharedBufferPool is a pool of buffers that can be shared, resulting in ++// decreased memory allocation. Currently, in gRPC-go, it is only utilized ++// for parsing incoming messages. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++type SharedBufferPool interface { ++ // Get returns a buffer with specified length from the pool. ++ // ++ // The returned byte slice may be not zero initialized. ++ Get(length int) []byte ++ ++ // Put returns a buffer to the pool. ++ Put(*[]byte) ++} ++ ++// NewSharedBufferPool creates a simple SharedBufferPool with buckets ++// of different sizes to optimize memory usage. This prevents the pool from ++// wasting large amounts of memory, even when handling messages of varying sizes. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func NewSharedBufferPool() SharedBufferPool { ++ return &simpleSharedBufferPool{ ++ pools: [poolArraySize]simpleSharedBufferChildPool{ ++ newBytesPool(level0PoolMaxSize), ++ newBytesPool(level1PoolMaxSize), ++ newBytesPool(level2PoolMaxSize), ++ newBytesPool(level3PoolMaxSize), ++ newBytesPool(level4PoolMaxSize), ++ newBytesPool(0), ++ }, ++ } ++} ++ ++// simpleSharedBufferPool is a simple implementation of SharedBufferPool. ++type simpleSharedBufferPool struct { ++ pools [poolArraySize]simpleSharedBufferChildPool ++} ++ ++func (p *simpleSharedBufferPool) Get(size int) []byte { ++ return p.pools[p.poolIdx(size)].Get(size) ++} ++ ++func (p *simpleSharedBufferPool) Put(bs *[]byte) { ++ p.pools[p.poolIdx(cap(*bs))].Put(bs) ++} ++ ++func (p *simpleSharedBufferPool) poolIdx(size int) int { ++ switch { ++ case size <= level0PoolMaxSize: ++ return level0PoolIdx ++ case size <= level1PoolMaxSize: ++ return level1PoolIdx ++ case size <= level2PoolMaxSize: ++ return level2PoolIdx ++ case size <= level3PoolMaxSize: ++ return level3PoolIdx ++ case size <= level4PoolMaxSize: ++ return level4PoolIdx ++ default: ++ return levelMaxPoolIdx ++ } ++} ++ ++const ( ++ level0PoolMaxSize = 16 // 16 B ++ level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B ++ level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB ++ level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB ++ level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB ++) ++ ++const ( ++ level0PoolIdx = iota ++ level1PoolIdx ++ level2PoolIdx ++ level3PoolIdx ++ level4PoolIdx ++ levelMaxPoolIdx ++ poolArraySize ++) ++ ++type simpleSharedBufferChildPool interface { ++ Get(size int) []byte ++ Put(any) ++} ++ ++type bufferPool struct { ++ sync.Pool ++ ++ defaultSize int ++} ++ ++func (p *bufferPool) Get(size int) []byte { ++ bs := p.Pool.Get().(*[]byte) ++ ++ if cap(*bs) < size { ++ p.Pool.Put(bs) ++ ++ return make([]byte, size) ++ } ++ ++ return (*bs)[:size] ++} ++ ++func newBytesPool(size int) simpleSharedBufferChildPool { ++ return &bufferPool{ ++ Pool: sync.Pool{ ++ New: func() any { ++ bs := make([]byte, size) ++ return &bs ++ }, ++ }, ++ defaultSize: size, ++ } ++} ++ ++// nopBufferPool is a buffer pool just makes new buffer without pooling. ++type nopBufferPool struct { ++} ++ ++func (nopBufferPool) Get(length int) []byte { ++ return make([]byte, length) ++} ++ ++func (nopBufferPool) Put(*[]byte) { ++} +diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go +index 7a552a9b787..4ab70e2d462 100644 +--- a/vendor/google.golang.org/grpc/stats/stats.go ++++ b/vendor/google.golang.org/grpc/stats/stats.go +@@ -59,12 +59,22 @@ func (s *Begin) IsClient() bool { return s.Client } + + func (s *Begin) isRPCStats() {} + ++// PickerUpdated indicates that the LB policy provided a new picker while the ++// RPC was waiting for one. ++type PickerUpdated struct{} ++ ++// IsClient indicates if the stats information is from client side. Only Client ++// Side interfaces with a Picker, thus always returns true. ++func (*PickerUpdated) IsClient() bool { return true } ++ ++func (*PickerUpdated) isRPCStats() {} ++ + // InPayload contains the information for an incoming payload. + type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. +- Payload interface{} ++ Payload any + // Data is the serialized message payload. + Data []byte + +@@ -134,7 +144,7 @@ type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. +- Payload interface{} ++ Payload any + // Data is the serialized message payload. + Data []byte + // Length is the size of the uncompressed payload data. Does not include any +diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go +index bcf2e4d81be..a93360efb84 100644 +--- a/vendor/google.golang.org/grpc/status/status.go ++++ b/vendor/google.golang.org/grpc/status/status.go +@@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { + } + + // Newf returns New(c, fmt.Sprintf(format, a...)). +-func Newf(c codes.Code, format string, a ...interface{}) *Status { ++func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) + } + +@@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { + } + + // Errorf returns Error(c, fmt.Sprintf(format, a...)). +-func Errorf(c codes.Code, format string, a ...interface{}) error { ++func Errorf(c codes.Code, format string, a ...any) error { + return Error(c, fmt.Sprintf(format, a...)) + } + +@@ -99,25 +99,27 @@ func FromError(err error) (s *Status, ok bool) { + } + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { +- if gs.GRPCStatus() == nil { ++ grpcStatus := gs.GRPCStatus() ++ if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } +- return gs.GRPCStatus(), true ++ return grpcStatus, true + } + var gs grpcstatus + if errors.As(err, &gs) { +- if gs.GRPCStatus() == nil { ++ grpcStatus := gs.GRPCStatus() ++ if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } +- p := gs.GRPCStatus().Proto() ++ p := grpcStatus.Proto() + p.Message = err.Error() + return status.FromProto(p), true + } +diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go +index 10092685b22..b14b2fbea2e 100644 +--- a/vendor/google.golang.org/grpc/stream.go ++++ b/vendor/google.golang.org/grpc/stream.go +@@ -31,6 +31,7 @@ import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" ++ "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" +@@ -54,7 +55,7 @@ import ( + // status package, or be one of the context errors. Otherwise, gRPC will use + // codes.Unknown as the status code and err.Error() as the status message of the + // RPC. +-type StreamHandler func(srv interface{}, stream ServerStream) error ++type StreamHandler func(srv any, stream ServerStream) error + + // StreamDesc represents a streaming RPC service's method specification. Used + // on the server when registering services and on the client when initiating +@@ -79,9 +80,9 @@ type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. +- SendMsg(m interface{}) error ++ SendMsg(m any) error + // Deprecated: See ClientStream and ServerStream documentation instead. +- RecvMsg(m interface{}) error ++ RecvMsg(m any) error + } + + // ClientStream defines the client-side behavior of a streaming RPC. +@@ -90,7 +91,9 @@ type Stream interface { + // status package. + type ClientStream interface { + // Header returns the header metadata received from the server if there +- // is any. It blocks if the metadata is not ready to read. ++ // is any. It blocks if the metadata is not ready to read. If the metadata ++ // is nil and the error is also nil, then the stream was terminated without ++ // headers, and the status can be discovered by calling RecvMsg. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or +@@ -126,7 +129,7 @@ type ClientStream interface { + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. +- SendMsg(m interface{}) error ++ SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC +@@ -135,7 +138,7 @@ type ClientStream interface { + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. +- RecvMsg(m interface{}) error ++ RecvMsg(m any) error + } + + // NewStream creates a new Stream for the client side. This is typically +@@ -155,11 +158,6 @@ type ClientStream interface { + // If none of the above happen, a goroutine and a context will be leaked, and grpc + // will not call the optionally-configured stats handler with a stats.End message. + func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { +- if err := cc.idlenessMgr.onCallBegin(); err != nil { +- return nil, err +- } +- defer cc.idlenessMgr.onCallEnd() +- + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) +@@ -176,6 +174,16 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth + } + + func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { ++ // Start tracking the RPC for idleness purposes. This is where a stream is ++ // created for both streaming and unary RPCs, and hence is a good place to ++ // track active RPC count. ++ if err := cc.idlenessMgr.OnCallBegin(); err != nil { ++ return nil, err ++ } ++ // Add a calloption, to decrement the active call count, that gets executed ++ // when the RPC completes. ++ opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) ++ + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // validate md + if err := imetadata.Validate(md); err != nil { +@@ -433,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) + ctx = trace.NewContext(ctx, trInfo.tr) + } + +- if cs.cc.parsedTarget.URL.Scheme == "xds" { ++ if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( +@@ -507,7 +515,7 @@ func (a *csAttempt) newStream() error { + return toRPCErr(nse.Err) + } + a.s = s +- a.p = &parser{r: s} ++ a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + return nil + } + +@@ -788,23 +796,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) + + func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD +- noHeader := false + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() +- if err == transport.ErrNoHeaders { +- noHeader = true +- return nil +- } + return toRPCErr(err) + }, cs.commitAttemptLocked) + ++ if m == nil && err == nil { ++ // The stream ended with success. Finish the clientStream. ++ err = io.EOF ++ } ++ + if err != nil { + cs.finish(err) +- return nil, err ++ // Do not return the error. The user should get it by calling Recv(). ++ return nil, nil + } + +- if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { ++ if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. + logEntry := &binarylog.ServerHeader{ +@@ -820,6 +829,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { + binlog.Log(cs.ctx, logEntry) + } + } ++ + return m, nil + } + +@@ -860,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error + cs.buffer = append(cs.buffer, op) + } + +-func (cs *clientStream) SendMsg(m interface{}) (err error) { ++func (cs *clientStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg +@@ -904,7 +914,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { + return err + } + +-func (cs *clientStream) RecvMsg(m interface{}) error { ++func (cs *clientStream) RecvMsg(m any) error { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() +@@ -928,24 +938,6 @@ func (cs *clientStream) RecvMsg(m interface{}) error { + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) +- +- if len(cs.binlogs) != 0 { +- // finish will not log Trailer. Log Trailer here. +- logEntry := &binarylog.ServerTrailer{ +- OnClientSide: true, +- Trailer: cs.Trailer(), +- Err: err, +- } +- if logEntry.Err == io.EOF { +- logEntry.Err = nil +- } +- if peer, ok := peer.FromContext(cs.Context()); ok { +- logEntry.PeerAddr = peer.Addr +- } +- for _, binlog := range cs.binlogs { +- binlog.Log(cs.ctx, logEntry) +- } +- } + } + return err + } +@@ -1001,18 +993,30 @@ func (cs *clientStream) finish(err error) { + } + } + } ++ + cs.mu.Unlock() +- // For binary logging. only log cancel in finish (could be caused by RPC ctx +- // canceled or ClientConn closed). Trailer will be logged in RecvMsg. +- // +- // Only one of cancel or trailer needs to be logged. In the cases where +- // users don't call RecvMsg, users must have already canceled the RPC. +- if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { +- c := &binarylog.Cancel{ +- OnClientSide: true, +- } +- for _, binlog := range cs.binlogs { +- binlog.Log(cs.ctx, c) ++ // Only one of cancel or trailer needs to be logged. ++ if len(cs.binlogs) != 0 { ++ switch err { ++ case errContextCanceled, errContextDeadline, ErrClientConnClosing: ++ c := &binarylog.Cancel{ ++ OnClientSide: true, ++ } ++ for _, binlog := range cs.binlogs { ++ binlog.Log(cs.ctx, c) ++ } ++ default: ++ logEntry := &binarylog.ServerTrailer{ ++ OnClientSide: true, ++ Trailer: cs.Trailer(), ++ Err: err, ++ } ++ if peer, ok := peer.FromContext(cs.Context()); ok { ++ logEntry.PeerAddr = peer.Addr ++ } ++ for _, binlog := range cs.binlogs { ++ binlog.Log(cs.ctx, logEntry) ++ } + } + } + if err == nil { +@@ -1028,7 +1032,7 @@ func (cs *clientStream) finish(err error) { + cs.cancel() + } + +-func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { ++func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() +@@ -1055,7 +1059,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + return nil + } + +-func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { ++func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { + cs := a.cs + if len(a.statsHandlers) != 0 && payInfo == nil { + payInfo = &payloadInfo{} +@@ -1270,7 +1274,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin + return nil, err + } + as.s = s +- as.p = &parser{r: s} ++ as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on stream context to cleanup when the stream context is +@@ -1348,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context { + return as.s.Context() + } + +-func (as *addrConnStream) SendMsg(m interface{}) (err error) { ++func (as *addrConnStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg +@@ -1393,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { + return nil + } + +-func (as *addrConnStream) RecvMsg(m interface{}) (err error) { ++func (as *addrConnStream) RecvMsg(m any) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. +@@ -1512,7 +1516,7 @@ type ServerStream interface { + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. +- SendMsg(m interface{}) error ++ SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the +@@ -1521,7 +1525,7 @@ type ServerStream interface { + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. +- RecvMsg(m interface{}) error ++ RecvMsg(m any) error + } + + // serverStream implements a server side Stream. +@@ -1602,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { + ss.s.SetTrailer(md) + } + +-func (ss *serverStream) SendMsg(m interface{}) (err error) { ++func (ss *serverStream) SendMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() +@@ -1610,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { +- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ++ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } +@@ -1677,7 +1681,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { + return nil + } + +-func (ss *serverStream) RecvMsg(m interface{}) (err error) { ++func (ss *serverStream) RecvMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() +@@ -1685,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { +- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ++ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } +@@ -1757,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { + // prepareMsg returns the hdr, payload and data + // using the compressors passed or using the + // passed preparedmsg +-func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { ++func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } +diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go +index bfa5dfa40e4..07f01257688 100644 +--- a/vendor/google.golang.org/grpc/tap/tap.go ++++ b/vendor/google.golang.org/grpc/tap/tap.go +@@ -27,6 +27,8 @@ package tap + + import ( + "context" ++ ++ "google.golang.org/grpc/metadata" + ) + + // Info defines the relevant information needed by the handles. +@@ -34,6 +36,10 @@ type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string ++ ++ // Header contains the header metadata received. ++ Header metadata.MD ++ + // TODO: More to be added. + } + +diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go +index 07a2d26b3e7..9ded79321ba 100644 +--- a/vendor/google.golang.org/grpc/trace.go ++++ b/vendor/google.golang.org/grpc/trace.go +@@ -97,8 +97,8 @@ func truncate(x string, l int) string { + + // payload represents an RPC request or response payload. + type payload struct { +- sent bool // whether this is an outgoing payload +- msg interface{} // e.g. a proto.Message ++ sent bool // whether this is an outgoing payload ++ msg any // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? + } + +@@ -111,7 +111,7 @@ func (p payload) String() string { + + type fmtStringer struct { + format string +- a []interface{} ++ a []any + } + + func (f *fmtStringer) String() string { +diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go +index 3cc75406218..6d2cadd79a9 100644 +--- a/vendor/google.golang.org/grpc/version.go ++++ b/vendor/google.golang.org/grpc/version.go +@@ -19,4 +19,4 @@ + package grpc + + // Version is the current grpc version. +-const Version = "1.56.3" ++const Version = "1.59.0" +diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh +index a8e4732b3d2..bb480f1f9cc 100644 +--- a/vendor/google.golang.org/grpc/vet.sh ++++ b/vendor/google.golang.org/grpc/vet.sh +@@ -84,12 +84,18 @@ not git grep -l 'x/net/context' -- "*.go" + # thread safety. + git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + ++# - Do not use "interface{}"; use "any" instead. ++git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' ++ + # - Do not call grpclog directly. Use grpclog.Component instead. + git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' + + # - Ensure all ptypes proto packages are renamed when importing. + not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + ++# - Ensure all usages of grpc_testing package are renamed when importing. ++not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" ++ + # - Ensure all xds proto imports are renamed to *pb or *grpc. + git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' + +@@ -106,7 +112,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" + +- go mod tidy -compat=1.17 ++ go mod tidy -compat=1.19 + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) + popd +@@ -168,8 +174,6 @@ proto.RegisteredExtension is deprecated + proto.RegisteredExtensions is deprecated + proto.RegisterMapType is deprecated + proto.Unmarshaler is deprecated +-resolver.Backend +-resolver.GRPCLB + Target is deprecated: Use the Target field in the BuildOptions instead. + xxx_messageInfo_ + ' "${SC_OUT}" +diff --git a/vendor/modules.txt b/vendor/modules.txt +index c9c3d3168d4..988c0f0e9d4 100644 +--- a/vendor/modules.txt ++++ b/vendor/modules.txt +@@ -1,7 +1,7 @@ + # bitbucket.org/bertimus9/systemstat v0.5.0 + ## explicit; go 1.17 + bitbucket.org/bertimus9/systemstat +-# cloud.google.com/go/compute v1.19.1 ++# cloud.google.com/go/compute v1.23.0 + ## explicit; go 1.19 + cloud.google.com/go/compute/internal + # cloud.google.com/go/compute/metadata v0.2.3 +@@ -166,8 +166,8 @@ github.com/beorn7/perks/quantile + # github.com/blang/semver/v4 v4.0.0 + ## explicit; go 1.14 + github.com/blang/semver/v4 +-# github.com/cenkalti/backoff/v4 v4.1.3 +-## explicit; go 1.13 ++# github.com/cenkalti/backoff/v4 v4.2.1 ++## explicit; go 1.18 + github.com/cenkalti/backoff/v4 + # github.com/cespare/xxhash/v2 v2.2.0 + ## explicit; go 1.11 +@@ -278,8 +278,8 @@ github.com/fvbommel/sortorder + # github.com/go-errors/errors v1.0.1 + ## explicit + github.com/go-errors/errors +-# github.com/go-logr/logr v1.2.3 +-## explicit; go 1.16 ++# github.com/go-logr/logr v1.3.0 ++## explicit; go 1.18 + github.com/go-logr/logr + github.com/go-logr/logr/funcr + # github.com/go-logr/stdr v1.2.2 +@@ -438,7 +438,7 @@ github.com/google/gnostic/extensions + github.com/google/gnostic/jsonschema + github.com/google/gnostic/openapiv2 + github.com/google/gnostic/openapiv3 +-# github.com/google/go-cmp v0.5.9 ++# github.com/google/go-cmp v0.6.0 + ## explicit; go 1.13 + github.com/google/go-cmp/cmp + github.com/google/go-cmp/cmp/cmpopts +@@ -452,17 +452,39 @@ github.com/google/gofuzz + # github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 + ## explicit; go 1.14 + github.com/google/pprof/profile ++# github.com/google/s2a-go v0.1.4 ++## explicit; go 1.16 ++github.com/google/s2a-go ++github.com/google/s2a-go/fallback ++github.com/google/s2a-go/internal/authinfo ++github.com/google/s2a-go/internal/handshaker ++github.com/google/s2a-go/internal/handshaker/service ++github.com/google/s2a-go/internal/proto/common_go_proto ++github.com/google/s2a-go/internal/proto/s2a_context_go_proto ++github.com/google/s2a-go/internal/proto/s2a_go_proto ++github.com/google/s2a-go/internal/proto/v2/common_go_proto ++github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto ++github.com/google/s2a-go/internal/proto/v2/s2a_go_proto ++github.com/google/s2a-go/internal/record ++github.com/google/s2a-go/internal/record/internal/aeadcrypter ++github.com/google/s2a-go/internal/record/internal/halfconn ++github.com/google/s2a-go/internal/tokenmanager ++github.com/google/s2a-go/internal/v2 ++github.com/google/s2a-go/internal/v2/certverifier ++github.com/google/s2a-go/internal/v2/remotesigner ++github.com/google/s2a-go/internal/v2/tlsconfigstore ++github.com/google/s2a-go/stream + # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 + ## explicit; go 1.13 + github.com/google/shlex +-# github.com/google/uuid v1.3.0 ++# github.com/google/uuid v1.3.1 + ## explicit + github.com/google/uuid + # github.com/googleapis/enterprise-certificate-proxy v0.2.3 + ## explicit; go 1.19 + github.com/googleapis/enterprise-certificate-proxy/client + github.com/googleapis/enterprise-certificate-proxy/client/util +-# github.com/googleapis/gax-go/v2 v2.7.1 ++# github.com/googleapis/gax-go/v2 v2.11.0 + ## explicit; go 1.19 + github.com/googleapis/gax-go/v2 + github.com/googleapis/gax-go/v2/apierror +@@ -485,8 +507,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus + github.com/grpc-ecosystem/grpc-gateway/internal + github.com/grpc-ecosystem/grpc-gateway/runtime + github.com/grpc-ecosystem/grpc-gateway/utilities +-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 +-## explicit; go 1.14 ++# github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 ++## explicit; go 1.17 + github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule + github.com/grpc-ecosystem/grpc-gateway/v2/runtime + github.com/grpc-ecosystem/grpc-gateway/v2/utilities +@@ -698,8 +720,6 @@ github.com/prometheus/procfs/internal/util + # github.com/robfig/cron/v3 v3.0.1 + ## explicit; go 1.12 + github.com/robfig/cron/v3 +-# github.com/rogpeppe/go-internal v1.12.0 +-## explicit; go 1.20 + # github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 + ## explicit + github.com/rubiojr/go-vhd/vhd +@@ -728,8 +748,8 @@ github.com/stoewer/go-strcase + # github.com/stretchr/objx v0.5.0 + ## explicit; go 1.12 + github.com/stretchr/objx +-# github.com/stretchr/testify v1.8.1 +-## explicit; go 1.13 ++# github.com/stretchr/testify v1.8.4 ++## explicit; go 1.20 + github.com/stretchr/testify/assert + github.com/stretchr/testify/mock + github.com/stretchr/testify/require +@@ -920,63 +940,61 @@ go.opencensus.io/trace/tracestate + # go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 + ## explicit; go 1.17 + go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful +-# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 +-## explicit; go 1.17 ++# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 ++## explicit; go 1.20 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal +-# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 +-## explicit; go 1.17 ++# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 ++## explicit; go 1.19 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +-# go.opentelemetry.io/otel v1.10.0 +-## explicit; go 1.17 ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil ++# go.opentelemetry.io/otel v1.20.0 ++## explicit; go 1.20 + go.opentelemetry.io/otel + go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/baggage + go.opentelemetry.io/otel/codes +-go.opentelemetry.io/otel/exporters/otlp/internal +-go.opentelemetry.io/otel/exporters/otlp/internal/envconfig + go.opentelemetry.io/otel/internal ++go.opentelemetry.io/otel/internal/attribute + go.opentelemetry.io/otel/internal/baggage + go.opentelemetry.io/otel/internal/global + go.opentelemetry.io/otel/propagation + go.opentelemetry.io/otel/semconv/internal + go.opentelemetry.io/otel/semconv/v1.12.0 ++go.opentelemetry.io/otel/semconv/v1.17.0 ++go.opentelemetry.io/otel/semconv/v1.21.0 + go.opentelemetry.io/otel/semconv/v1.4.0 +-# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 +-## explicit; go 1.17 +-go.opentelemetry.io/otel/exporters/otlp/internal/retry +-# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 +-## explicit; go 1.17 ++# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 ++## explicit; go 1.20 + go.opentelemetry.io/otel/exporters/otlp/otlptrace +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig + go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform +-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 +-## explicit; go 1.17 ++# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 ++## explicit; go 1.20 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc +-# go.opentelemetry.io/otel/metric v0.31.0 +-## explicit; go 1.17 ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry ++# go.opentelemetry.io/otel/metric v1.20.0 ++## explicit; go 1.20 + go.opentelemetry.io/otel/metric +-go.opentelemetry.io/otel/metric/global +-go.opentelemetry.io/otel/metric/instrument +-go.opentelemetry.io/otel/metric/instrument/asyncfloat64 +-go.opentelemetry.io/otel/metric/instrument/asyncint64 +-go.opentelemetry.io/otel/metric/instrument/syncfloat64 +-go.opentelemetry.io/otel/metric/instrument/syncint64 +-go.opentelemetry.io/otel/metric/internal/global +-go.opentelemetry.io/otel/metric/unit +-# go.opentelemetry.io/otel/sdk v1.10.0 +-## explicit; go 1.17 ++go.opentelemetry.io/otel/metric/embedded ++# go.opentelemetry.io/otel/sdk v1.20.0 ++## explicit; go 1.20 ++go.opentelemetry.io/otel/sdk + go.opentelemetry.io/otel/sdk/instrumentation + go.opentelemetry.io/otel/sdk/internal + go.opentelemetry.io/otel/sdk/internal/env + go.opentelemetry.io/otel/sdk/resource + go.opentelemetry.io/otel/sdk/trace + go.opentelemetry.io/otel/sdk/trace/tracetest +-# go.opentelemetry.io/otel/trace v1.10.0 +-## explicit; go 1.17 ++# go.opentelemetry.io/otel/trace v1.20.0 ++## explicit; go 1.20 + go.opentelemetry.io/otel/trace +-# go.opentelemetry.io/proto/otlp v0.19.0 +-## explicit; go 1.14 ++go.opentelemetry.io/otel/trace/embedded ++go.opentelemetry.io/otel/trace/noop ++# go.opentelemetry.io/proto/otlp v1.0.0 ++## explicit; go 1.17 + go.opentelemetry.io/proto/otlp/collector/trace/v1 + go.opentelemetry.io/proto/otlp/common/v1 + go.opentelemetry.io/proto/otlp/resource/v1 +@@ -1011,11 +1029,13 @@ go.uber.org/zap/zaptest + golang.org/x/crypto/bcrypt + golang.org/x/crypto/blowfish + golang.org/x/crypto/chacha20 ++golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/cryptobyte + golang.org/x/crypto/cryptobyte/asn1 + golang.org/x/crypto/curve25519 + golang.org/x/crypto/curve25519/internal/field + golang.org/x/crypto/ed25519 ++golang.org/x/crypto/hkdf + golang.org/x/crypto/internal/alias + golang.org/x/crypto/internal/poly1305 + golang.org/x/crypto/nacl/secretbox +@@ -1046,8 +1066,8 @@ golang.org/x/net/internal/timeseries + golang.org/x/net/proxy + golang.org/x/net/trace + golang.org/x/net/websocket +-# golang.org/x/oauth2 v0.7.0 +-## explicit; go 1.17 ++# golang.org/x/oauth2 v0.11.0 ++## explicit; go 1.18 + golang.org/x/oauth2 + golang.org/x/oauth2/authhandler + golang.org/x/oauth2/google +@@ -1123,7 +1143,7 @@ golang.org/x/tools/internal/pkgbits + golang.org/x/tools/internal/tokeninternal + golang.org/x/tools/internal/typeparams + golang.org/x/tools/internal/typesinternal +-# google.golang.org/api v0.114.0 ++# google.golang.org/api v0.126.0 + ## explicit; go 1.19 + google.golang.org/api/compute/v0.alpha + google.golang.org/api/compute/v0.beta +@@ -1152,24 +1172,27 @@ google.golang.org/appengine/internal/datastore + google.golang.org/appengine/internal/log + google.golang.org/appengine/internal/modules + google.golang.org/appengine/internal/remote_api ++google.golang.org/appengine/internal/socket + google.golang.org/appengine/internal/urlfetch ++google.golang.org/appengine/socket + google.golang.org/appengine/urlfetch +-# google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 ++# google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d + ## explicit; go 1.19 ++google.golang.org/genproto/internal + google.golang.org/genproto/protobuf/field_mask +-# google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a ++# google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d + ## explicit; go 1.19 + google.golang.org/genproto/googleapis/api + google.golang.org/genproto/googleapis/api/annotations + google.golang.org/genproto/googleapis/api/expr/v1alpha1 + google.golang.org/genproto/googleapis/api/httpbody +-# google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 ++# google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d + ## explicit; go 1.19 + google.golang.org/genproto/googleapis/rpc/code + google.golang.org/genproto/googleapis/rpc/errdetails + google.golang.org/genproto/googleapis/rpc/status +-# google.golang.org/grpc v1.56.3 +-## explicit; go 1.17 ++# google.golang.org/grpc v1.59.0 ++## explicit; go 1.19 + google.golang.org/grpc + google.golang.org/grpc/attributes + google.golang.org/grpc/backoff +@@ -1202,6 +1225,7 @@ google.golang.org/grpc/internal/grpclog + google.golang.org/grpc/internal/grpcrand + google.golang.org/grpc/internal/grpcsync + google.golang.org/grpc/internal/grpcutil ++google.golang.org/grpc/internal/idle + google.golang.org/grpc/internal/metadata + google.golang.org/grpc/internal/pretty + google.golang.org/grpc/internal/resolver diff --git a/projects/kubernetes/kubernetes/1-26/patches/0018-skip-x509-tests.patch b/projects/kubernetes/kubernetes/1-26/patches/0018-skip-x509-tests.patch new file mode 100644 index 0000000000..fd8ac4b5c8 --- /dev/null +++ b/projects/kubernetes/kubernetes/1-26/patches/0018-skip-x509-tests.patch @@ -0,0 +1,26 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Vineeth Bandi +Date: Tue, 14 Jan 2025 11:52:26 -0600 +Subject: [PATCH] skip x509 tests + +Signed-off-by: Vineeth Bandi +--- + hack/make-rules/test.sh | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/hack/make-rules/test.sh b/hack/make-rules/test.sh +index e9074678a8f..c78fc9b3b60 100755 +--- a/hack/make-rules/test.sh ++++ b/hack/make-rules/test.sh +@@ -54,7 +54,7 @@ kube::test::find_dirs() { + \) -prune \ + \) -name '*_test.go' -print0 | xargs -0n1 dirname | sed "s|^\./|${KUBE_GO_PACKAGE}/|" | LC_ALL=C sort -u + +- find ./staging -name '*_test.go' -not -path '*/test/integration/*' -prune -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u ++ find ./staging -name '*_test.go' -not -path '*/test/integration/*' -not -path '*/apiserver/pkg/authentication/request/x509/*' -prune -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u + ) + } + +-- +2.46.0 + diff --git a/projects/kubernetes/kubernetes/1-27/CHECKSUMS b/projects/kubernetes/kubernetes/1-27/CHECKSUMS index 614b91e4cb..21baa7aae5 100644 --- a/projects/kubernetes/kubernetes/1-27/CHECKSUMS +++ b/projects/kubernetes/kubernetes/1-27/CHECKSUMS @@ -1,19 +1,19 @@ -22c3304138f057c3b7a7fbd79680ec90c38f11d11ba8c77f0c7a9e1151997e4e _output/1-27/bin/darwin/amd64/kubectl -1928b0d3786b5c538dea4dfd4ce2b25f42ab6f0b073beeef0262d6a731f3339c _output/1-27/bin/linux/amd64/kube-apiserver -c377b8202129972a2ad86183298968e0912cd87d51005e64d242e1630bcbc00b _output/1-27/bin/linux/amd64/kube-controller-manager -2ee5c8ad44696b14b927fff0991b4ca48ac0348417977c3c1a06b86650177313 _output/1-27/bin/linux/amd64/kube-proxy -ed9ecb8aec3f0c5f7d07a46a4ea6cfd030e2fb63fad0f6fd2daa3695a6795433 _output/1-27/bin/linux/amd64/kube-scheduler -ce48cd91da4545c95269d0e025fb7c7733f1070de0b5681a35056dc8a08495fa _output/1-27/bin/linux/amd64/kubeadm -a5fedb37b53c5998d0e17b105873a0ec94c6d16e9afa54c1a3629028ff2b9e7a _output/1-27/bin/linux/amd64/kubectl -456d434eaff4a738e365f9c10a8b2c56479b5d1a45f42365e4420cbc09fee167 _output/1-27/bin/linux/amd64/kubelet -841e60c91509e727a7083a6b30373f82e796810357ae47caeabe974dab640e07 _output/1-27/bin/linux/arm64/kube-apiserver -544415b94afdb2f795f8ae50d0693e96283aac8f13605d546776c1ef96a2e376 _output/1-27/bin/linux/arm64/kube-controller-manager -af37353e95941d6a67ecce584860387d9b33dfb64e09cd6fd584ec778646944b _output/1-27/bin/linux/arm64/kube-proxy -da1b2e559ca26f2ed47da541ee268c9220af06d2a444bdfefa5f523934005f33 _output/1-27/bin/linux/arm64/kube-scheduler -54d638c9d202c6aadbe7440f5248dc0a2d10022459d58c925639810a62e73047 _output/1-27/bin/linux/arm64/kubeadm -7fca5af29d0dd043d47c1e174a61c241ca025d9fdc06eee59566e9c0da50d27e _output/1-27/bin/linux/arm64/kubectl -a220a95bfe82de93eca90fba9528e6ad7aa30882974557d6d12b9edafd86a61a _output/1-27/bin/linux/arm64/kubelet -6a5b2d75036ace058e4792674653aaf9c6ffe7be56b0142ef1c83b3169415d0b _output/1-27/bin/windows/amd64/kube-proxy.exe -19db1c82b519254f0aa13e79d9195bd1e6e2454661af38b18919f2877766381a _output/1-27/bin/windows/amd64/kubeadm.exe -17fdbbfc50921cec362222ae1fa12ced0af088d7bbb8d27eda5d51e20d9e5b56 _output/1-27/bin/windows/amd64/kubectl.exe -800afc3ab5330cefdda607ab2adb28b50fe5d7521570292a1d832485be86354e _output/1-27/bin/windows/amd64/kubelet.exe \ No newline at end of file +b61e0a85834568c993f6ab2a0a6b7393ab0c22ec4f45032c68d7a4281f79d3a8 _output/1-27/bin/darwin/amd64/kubectl +dbf8c171904ad5d6e013b3b81d814111cebca9a18fa14045d7f00f295b5f8759 _output/1-27/bin/linux/amd64/kube-apiserver +9a21e73991f01b3d6205987a8106e81dd882af48ee77f7938d3ebe61d80df294 _output/1-27/bin/linux/amd64/kube-controller-manager +4e48eebb421f520a4a1274e7d351226da1d31092397c61c5dbcf782fe97e05cf _output/1-27/bin/linux/amd64/kube-proxy +e9fb508f3fa4a436c371d56210151b644c040d117f4f60b1641aaeb75b669b8c _output/1-27/bin/linux/amd64/kube-scheduler +970bd1228050607bfc9b731c8489b00ff20034f5de4f0d0617a83a72d660e314 _output/1-27/bin/linux/amd64/kubeadm +3c9512e5d932276848067c7a3221328f375f19ad71b3e73052cf59c33f72c24f _output/1-27/bin/linux/amd64/kubectl +84cd5a7a5841fdbcd502d75dcaedfdf35a0c0428756689815c26d5189ce0ee28 _output/1-27/bin/linux/amd64/kubelet +5d13ae116a7e4a79d16cee6d83bf725561811dcac69e5c89e2ffe9075b743295 _output/1-27/bin/linux/arm64/kube-apiserver +1b04454177f11dc5b9a4737e3a9ec9cdcb326040f5d2e8378cc04e837f7f3bae _output/1-27/bin/linux/arm64/kube-controller-manager +9cdd8e825fa7a95d87edcb8805bbcc373e39d5728ec3e5154d05d0dc1357ba9a _output/1-27/bin/linux/arm64/kube-proxy +9d466f522c5ac849e6a5719190ae16cdecea2c799ac291c13d9e6fc8cfb3ff63 _output/1-27/bin/linux/arm64/kube-scheduler +fb474a0de73f987f64c0ba6ec3ae84b9c286ce1c84e271234ae2f6fbc6c653e3 _output/1-27/bin/linux/arm64/kubeadm +4211cd93f526f2b7b3f2dcb64f82d43de00bdd1343cc5a3d38fb821e6d795bb5 _output/1-27/bin/linux/arm64/kubectl +37d129af512f443918baf90d482b22040e0ad4471b49e12678565b38679f2a7c _output/1-27/bin/linux/arm64/kubelet +db4b859eb686c433919fc78c7eb17edabe0bb868e1d3e3f1d08950ad8cfd2e7d _output/1-27/bin/windows/amd64/kube-proxy.exe +bc4f241a9316db4ef9b6c55f8c3842d3ddab04a36a7ff49eb661269c7d391ee9 _output/1-27/bin/windows/amd64/kubeadm.exe +07720d211dbd5f9fcf3f2707b3e2eac1dd017eeac88b9a21ce55901c0db8add8 _output/1-27/bin/windows/amd64/kubectl.exe +e01d986ca8536fe783f414743a394da9404c8171ec2259af70959d93ac7754c0 _output/1-27/bin/windows/amd64/kubelet.exe diff --git a/projects/kubernetes/kubernetes/1-27/patches/0016-EKS-PATCH-fix-CVE-2023-47108.patch b/projects/kubernetes/kubernetes/1-27/patches/0016-EKS-PATCH-fix-CVE-2023-47108.patch new file mode 100644 index 0000000000..e8317acf13 --- /dev/null +++ b/projects/kubernetes/kubernetes/1-27/patches/0016-EKS-PATCH-fix-CVE-2023-47108.patch @@ -0,0 +1,129046 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: vela +Date: Mon, 2 Dec 2024 02:07:05 -0800 +Subject: [PATCH] --EKS-PATCH-- fix CVE-2023-47108 + +--- + .../google/s2a-go}/LICENSE | 5 +- + go.mod | 48 +- + go.sum | 350 +- + pkg/kubelet/cri/remote/remote_image.go | 1 + + pkg/kubelet/cri/remote/remote_runtime.go | 1 + + staging/src/k8s.io/api/go.mod | 7 +- + staging/src/k8s.io/api/go.sum | 28 +- + .../src/k8s.io/apiextensions-apiserver/go.mod | 41 +- + .../src/k8s.io/apiextensions-apiserver/go.sum | 356 +- + staging/src/k8s.io/apimachinery/go.mod | 11 +- + staging/src/k8s.io/apimachinery/go.sum | 28 +- + staging/src/k8s.io/apiserver/go.mod | 41 +- + staging/src/k8s.io/apiserver/go.sum | 356 +- + .../apiserver/pkg/endpoints/filters/traces.go | 12 +- + .../storage/storagebackend/factory/etcd3.go | 1 + + staging/src/k8s.io/cli-runtime/go.mod | 10 +- + staging/src/k8s.io/cli-runtime/go.sum | 30 +- + staging/src/k8s.io/client-go/go.mod | 10 +- + staging/src/k8s.io/client-go/go.sum | 30 +- + staging/src/k8s.io/cloud-provider/go.mod | 41 +- + staging/src/k8s.io/cloud-provider/go.sum | 362 +- + staging/src/k8s.io/cluster-bootstrap/go.mod | 4 +- + staging/src/k8s.io/cluster-bootstrap/go.sum | 26 +- + .../src/k8s.io/code-generator/examples/go.mod | 8 +- + .../src/k8s.io/code-generator/examples/go.sum | 22 +- + staging/src/k8s.io/code-generator/go.mod | 8 +- + staging/src/k8s.io/code-generator/go.sum | 24 +- + staging/src/k8s.io/component-base/go.mod | 39 +- + staging/src/k8s.io/component-base/go.sum | 244 +- + staging/src/k8s.io/component-helpers/go.mod | 8 +- + staging/src/k8s.io/component-helpers/go.sum | 30 +- + staging/src/k8s.io/controller-manager/go.mod | 41 +- + staging/src/k8s.io/controller-manager/go.sum | 362 +- + staging/src/k8s.io/cri-api/go.mod | 10 +- + staging/src/k8s.io/cri-api/go.sum | 51 +- + staging/src/k8s.io/csi-translation-lib/go.mod | 6 +- + staging/src/k8s.io/csi-translation-lib/go.sum | 28 +- + .../k8s.io/dynamic-resource-allocation/go.mod | 14 +- + .../k8s.io/dynamic-resource-allocation/go.sum | 47 +- + staging/src/k8s.io/kms/go.mod | 6 +- + staging/src/k8s.io/kms/go.sum | 37 +- + .../k8s.io/kms/internal/plugins/mock/go.mod | 6 +- + .../k8s.io/kms/internal/plugins/mock/go.sum | 14 +- + staging/src/k8s.io/kube-aggregator/go.mod | 41 +- + staging/src/k8s.io/kube-aggregator/go.sum | 362 +- + .../src/k8s.io/kube-controller-manager/go.mod | 3 +- + .../src/k8s.io/kube-controller-manager/go.sum | 58 +- + staging/src/k8s.io/kube-proxy/go.mod | 5 +- + staging/src/k8s.io/kube-proxy/go.sum | 55 +- + staging/src/k8s.io/kube-scheduler/go.mod | 4 +- + staging/src/k8s.io/kube-scheduler/go.sum | 53 +- + staging/src/k8s.io/kubectl/go.mod | 10 +- + staging/src/k8s.io/kubectl/go.sum | 59 +- + staging/src/k8s.io/kubelet/go.mod | 8 +- + staging/src/k8s.io/kubelet/go.sum | 68 +- + .../azure/azure_blobDiskController_test.go | 2 +- + .../src/k8s.io/legacy-cloud-providers/go.mod | 21 +- + .../src/k8s.io/legacy-cloud-providers/go.sum | 109 +- + staging/src/k8s.io/metrics/go.mod | 10 +- + staging/src/k8s.io/metrics/go.sum | 30 +- + staging/src/k8s.io/mount-utils/go.mod | 7 +- + staging/src/k8s.io/mount-utils/go.sum | 27 +- + .../src/k8s.io/pod-security-admission/go.mod | 41 +- + .../src/k8s.io/pod-security-admission/go.sum | 362 +- + staging/src/k8s.io/sample-apiserver/go.mod | 39 +- + staging/src/k8s.io/sample-apiserver/go.sum | 362 +- + staging/src/k8s.io/sample-cli-plugin/go.mod | 8 +- + staging/src/k8s.io/sample-cli-plugin/go.sum | 30 +- + staging/src/k8s.io/sample-controller/go.mod | 8 +- + staging/src/k8s.io/sample-controller/go.sum | 30 +- + .../go/compute/internal/version.go | 2 +- + .../cenkalti/backoff/v4/.travis.yml | 10 - + .../github.com/cenkalti/backoff/v4/retry.go | 50 +- + vendor/github.com/go-logr/logr/.golangci.yaml | 3 - + vendor/github.com/go-logr/logr/README.md | 113 +- + vendor/github.com/go-logr/logr/SECURITY.md | 18 + + vendor/github.com/go-logr/logr/discard.go | 32 +- + vendor/github.com/go-logr/logr/funcr/funcr.go | 75 +- + vendor/github.com/go-logr/logr/logr.go | 201 +- + .../google/go-cmp/cmp/cmpopts/equate.go | 49 +- + .../google/go-cmp/cmp/cmpopts/ignore.go | 16 +- + .../google/go-cmp/cmp/cmpopts/sort.go | 12 +- + .../google/go-cmp/cmp/cmpopts/xform.go | 4 +- + .../github.com/google/go-cmp/cmp/compare.go | 38 +- + .../cmp/{export_unsafe.go => export.go} | 5 - + .../google/go-cmp/cmp/export_panic.go | 16 - + .../value/{pointer_unsafe.go => pointer.go} | 3 - + .../cmp/internal/value/pointer_purego.go | 34 - + .../github.com/google/go-cmp/cmp/options.go | 84 +- + vendor/github.com/google/go-cmp/cmp/path.go | 46 +- + .../google/go-cmp/cmp/report_reflect.go | 2 +- + vendor/github.com/google/s2a-go/.gitignore | 6 + + .../google/s2a-go/CODE_OF_CONDUCT.md | 93 + + .../github.com/google/s2a-go/CONTRIBUTING.md | 29 + + .../google/s2a-go/LICENSE.md} | 1 + + vendor/github.com/google/s2a-go/README.md | 17 + + .../google/s2a-go/fallback/s2a_fallback.go | 167 + + .../s2a-go/internal/authinfo/authinfo.go | 119 + + .../s2a-go/internal/handshaker/handshaker.go | 438 + + .../internal/handshaker/service/service.go | 99 + + .../proto/common_go_proto/common.pb.go | 389 + + .../s2a_context_go_proto/s2a_context.pb.go | 267 + + .../internal/proto/s2a_go_proto/s2a.pb.go | 1377 + + .../proto/s2a_go_proto/s2a_grpc.pb.go | 173 + + .../proto/v2/common_go_proto/common.pb.go | 367 + + .../v2/s2a_context_go_proto/s2a_context.pb.go | 248 + + .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 2494 ++ + .../proto/v2/s2a_go_proto/s2a_grpc.pb.go | 159 + + .../internal/aeadcrypter/aeadcrypter.go | 34 + + .../record/internal/aeadcrypter/aesgcm.go | 70 + + .../record/internal/aeadcrypter/chachapoly.go | 67 + + .../record/internal/aeadcrypter/common.go | 92 + + .../record/internal/halfconn/ciphersuite.go | 98 + + .../record/internal/halfconn/counter.go | 60 + + .../record/internal/halfconn/expander.go | 59 + + .../record/internal/halfconn/halfconn.go | 193 + + .../google/s2a-go/internal/record/record.go | 757 + + .../s2a-go/internal/record/ticketsender.go | 176 + + .../internal/tokenmanager/tokenmanager.go | 70 + + .../google/s2a-go/internal/v2/README.md | 1 + + .../internal/v2/certverifier/certverifier.go | 122 + + .../testdata/client_intermediate_cert.der | Bin 0 -> 998 bytes + .../testdata/client_leaf_cert.der | Bin 0 -> 1147 bytes + .../testdata/client_root_cert.der | Bin 0 -> 1013 bytes + .../testdata/server_intermediate_cert.der | Bin 0 -> 998 bytes + .../testdata/server_leaf_cert.der | Bin 0 -> 1147 bytes + .../testdata/server_root_cert.der | Bin 0 -> 1013 bytes + .../internal/v2/remotesigner/remotesigner.go | 186 + + .../v2/remotesigner/testdata/client_cert.der | Bin 0 -> 1013 bytes + .../v2/remotesigner/testdata/client_cert.pem | 24 + + .../v2/remotesigner/testdata/client_key.pem | 27 + + .../v2/remotesigner/testdata/server_cert.der | Bin 0 -> 1013 bytes + .../v2/remotesigner/testdata/server_cert.pem | 24 + + .../v2/remotesigner/testdata/server_key.pem | 27 + + .../google/s2a-go/internal/v2/s2av2.go | 354 + + .../internal/v2/testdata/client_cert.pem | 24 + + .../internal/v2/testdata/client_key.pem | 27 + + .../internal/v2/testdata/server_cert.pem | 24 + + .../internal/v2/testdata/server_key.pem | 27 + + .../tlsconfigstore/testdata/client_cert.pem | 24 + + .../v2/tlsconfigstore/testdata/client_key.pem | 27 + + .../tlsconfigstore/testdata/server_cert.pem | 24 + + .../v2/tlsconfigstore/testdata/server_key.pem | 27 + + .../v2/tlsconfigstore/tlsconfigstore.go | 404 + + vendor/github.com/google/s2a-go/s2a.go | 412 + + .../github.com/google/s2a-go/s2a_options.go | 208 + + vendor/github.com/google/s2a-go/s2a_utils.go | 79 + + .../google/s2a-go/stream/s2a_stream.go | 34 + + .../google/s2a-go/testdata/client_cert.pem | 24 + + .../google/s2a-go/testdata/client_key.pem | 27 + + .../google/s2a-go/testdata/server_cert.pem | 24 + + .../google/s2a-go/testdata/server_key.pem | 27 + + vendor/github.com/google/uuid/.travis.yml | 9 - + vendor/github.com/google/uuid/CHANGELOG.md | 10 + + vendor/github.com/google/uuid/CONTRIBUTING.md | 16 + + vendor/github.com/google/uuid/README.md | 10 +- + vendor/github.com/google/uuid/node_js.go | 2 +- + vendor/github.com/google/uuid/uuid.go | 10 +- + .../gax-go/v2/.release-please-manifest.json | 2 +- + .../googleapis/gax-go/v2/CHANGES.md | 45 + + .../googleapis/gax-go/v2/apierror/apierror.go | 14 + + .../googleapis/gax-go/v2/call_option.go | 21 + + .../github.com/googleapis/gax-go/v2/header.go | 68 +- + .../googleapis/gax-go/v2/internal/version.go | 2 +- + .../github.com/googleapis/gax-go/v2/invoke.go | 10 + + .../grpc-gateway/v2/internal/httprule/fuzz.go | 4 +- + .../v2/internal/httprule/parse.go | 30 +- + .../grpc-gateway/v2/runtime/BUILD.bazel | 10 +- + .../grpc-gateway/v2/runtime/context.go | 72 +- + .../grpc-gateway/v2/runtime/convert.go | 46 +- + .../grpc-gateway/v2/runtime/errors.go | 17 +- + .../grpc-gateway/v2/runtime/fieldmask.go | 9 +- + .../grpc-gateway/v2/runtime/handler.go | 26 +- + .../grpc-gateway/v2/runtime/marshal_jsonpb.go | 38 +- + .../grpc-gateway/v2/runtime/marshal_proto.go | 9 +- + .../grpc-gateway/v2/runtime/mux.go | 162 +- + .../grpc-gateway/v2/runtime/pattern.go | 2 - + .../grpc-gateway/v2/runtime/query.go | 69 +- + .../grpc-gateway/v2/utilities/BUILD.bazel | 6 +- + .../v2/utilities/readerfactory.go | 3 +- + .../v2/utilities/string_array_flag.go | 33 + + .../grpc-gateway/v2/utilities/trie.go | 2 +- + .../testify/assert/assertion_compare.go | 36 +- + .../testify/assert/assertion_format.go | 216 +- + .../testify/assert/assertion_forward.go | 432 +- + .../testify/assert/assertion_order.go | 24 +- + .../stretchr/testify/assert/assertions.go | 384 +- + .../github.com/stretchr/testify/assert/doc.go | 43 +- + .../testify/assert/http_assertions.go | 12 +- + .../stretchr/testify/require/doc.go | 23 +- + .../stretchr/testify/require/require.go | 444 +- + .../testify/require/require_forward.go | 432 +- + .../google.golang.org/grpc/otelgrpc/config.go | 229 + + .../google.golang.org/grpc/otelgrpc/doc.go | 22 + + .../grpc/otelgrpc/grpctrace.go | 163 - + .../grpc/otelgrpc/interceptor.go | 311 +- + .../grpc/otelgrpc/internal/parse.go | 26 +- + .../grpc/otelgrpc/metadata_supplier.go | 98 + + .../grpc/otelgrpc/semconv.go | 4 +- + .../grpc/otelgrpc/stats_handler.go | 235 + + .../grpc/otelgrpc/version.go | 6 +- + .../net/http/otelhttp/common.go | 2 +- + .../net/http/otelhttp/config.go | 14 +- + .../net/http/otelhttp/handler.go | 116 +- + .../http/otelhttp/internal/semconvutil/gen.go | 21 + + .../otelhttp/internal/semconvutil/httpconv.go | 552 + + .../otelhttp/internal/semconvutil/netconv.go | 368 + + .../net/http/otelhttp/transport.go | 10 +- + .../net/http/otelhttp/version.go | 6 +- + .../instrumentation/net/http/otelhttp/wrap.go | 14 +- + .../go.opentelemetry.io/otel/.codespellignore | 5 + + vendor/go.opentelemetry.io/otel/.codespellrc | 10 + + vendor/go.opentelemetry.io/otel/.gitignore | 7 +- + vendor/go.opentelemetry.io/otel/.golangci.yml | 104 +- + vendor/go.opentelemetry.io/otel/.lycheeignore | 3 + + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 852 +- + vendor/go.opentelemetry.io/otel/CODEOWNERS | 4 +- + .../go.opentelemetry.io/otel/CONTRIBUTING.md | 198 +- + vendor/go.opentelemetry.io/otel/Makefile | 142 +- + vendor/go.opentelemetry.io/otel/README.md | 54 +- + vendor/go.opentelemetry.io/otel/RELEASING.md | 35 +- + .../otel/attribute/filter.go | 60 + + .../go.opentelemetry.io/otel/attribute/set.go | 27 +- + .../otel/attribute/value.go | 95 +- + .../otel/baggage/baggage.go | 88 +- + .../go.opentelemetry.io/otel/codes/codes.go | 10 + + vendor/go.opentelemetry.io/otel/codes/doc.go | 2 +- + .../otel/exporters/otlp/otlptrace/README.md | 8 +- + .../otel/exporters/otlp/otlptrace/exporter.go | 7 +- + .../otlp/otlptrace/otlptracegrpc/client.go | 23 +- + .../internal/envconfig/envconfig.go | 94 +- + .../otlptrace/otlptracegrpc/internal/gen.go | 35 + + .../internal/otlpconfig/envconfig.go | 34 +- + .../internal/otlpconfig/options.go | 29 +- + .../internal/otlpconfig/optiontypes.go | 5 +- + .../internal/otlpconfig/tls.go | 5 +- + .../otlptracegrpc}/internal/partialsuccess.go | 39 +- + .../otlptracegrpc}/internal/retry/retry.go | 40 +- + .../otlp/otlptrace/otlptracegrpc/options.go | 4 +- + .../otlp/otlptrace/version.go} | 12 +- + vendor/go.opentelemetry.io/otel/handler.go | 65 +- + .../otel/internal/attribute/attribute.go | 111 + + .../go.opentelemetry.io/otel/internal/gen.go | 29 + + .../otel/internal/global/handler.go | 102 + + .../otel/internal/global/instruments.go | 371 + + .../otel/internal/global/internal_logging.go | 44 +- + .../otel/internal/global/meter.go | 354 + + .../otel/internal/global/state.go | 45 +- + .../otel/internal/global/trace.go | 7 + + vendor/go.opentelemetry.io/otel/metric.go | 53 + + .../otel/metric/asyncfloat64.go | 271 + + .../otel/metric/asyncint64.go | 269 + + .../go.opentelemetry.io/otel/metric/config.go | 25 +- + vendor/go.opentelemetry.io/otel/metric/doc.go | 157 +- + .../otel/metric/embedded/embedded.go | 234 + + .../otel/metric/global/global.go | 42 - + .../otel/metric/instrument.go | 357 + + .../instrument/asyncfloat64/asyncfloat64.go | 70 - + .../instrument/asyncint64/asyncint64.go | 70 - + .../otel/metric/instrument/config.go | 69 - + .../instrument/syncfloat64/syncfloat64.go | 56 - + .../metric/instrument/syncint64/syncint64.go | 56 - + .../metric/internal/global/instruments.go | 360 - + .../otel/metric/internal/global/meter.go | 347 - + .../otel/metric/internal/global/state.go | 68 - + .../go.opentelemetry.io/otel/metric/meter.go | 204 +- + .../go.opentelemetry.io/otel/metric/noop.go | 181 - + .../otel/metric/syncfloat64.go | 185 + + .../otel/metric/syncint64.go | 185 + + .../otel/propagation/trace_context.go | 6 +- + .../go.opentelemetry.io/otel/requirements.txt | 1 + + .../otel/sdk/internal/env/env.go | 10 +- + .../otel/sdk/internal/gen.go | 29 + + .../otel/sdk/internal/internal.go | 11 +- + .../otel/sdk/resource/auto.go | 68 +- + .../otel/sdk/resource/builtin.go | 10 +- + .../otel/sdk/resource/config.go | 7 + + .../otel/sdk/resource/container.go | 4 +- + .../otel/sdk/resource/doc.go | 3 + + .../otel/sdk/resource/env.go | 31 +- + .../otel/sdk/resource/host_id.go | 120 + + .../resource/host_id_bsd.go} | 19 +- + .../otel/sdk/resource/host_id_darwin.go | 19 + + .../resource/host_id_exec.go} | 27 +- + .../otel/sdk/resource/host_id_linux.go | 22 + + .../otel/sdk/resource/host_id_readfile.go | 28 + + .../otel/sdk/resource/host_id_unsupported.go | 36 + + .../otel/sdk/resource/host_id_windows.go | 48 + + .../otel/sdk/resource/os.go | 13 +- + .../otel/sdk/resource/os_release_unix.go | 8 +- + .../otel/sdk/resource/process.go | 54 +- + .../otel/sdk/resource/resource.go | 27 +- + .../otel/sdk/trace/batch_span_processor.go | 70 +- + .../otel/sdk/trace/provider.go | 129 +- + .../otel/sdk/trace/sampling.go | 14 +- + .../otel/sdk/trace/simple_span_processor.go | 9 +- + .../otel/sdk/trace/span.go | 66 +- + .../otel/sdk/trace/span_exporter.go | 2 +- + .../otel/sdk/trace/span_processor.go | 7 +- + .../otel/sdk/trace/tracer.go | 5 +- + .../otel/sdk/trace/tracetest/span.go | 1 + + .../otel/sdk/trace/version.go | 20 + + .../go.opentelemetry.io/otel/sdk/version.go | 20 + + .../otel/semconv/internal/http.go | 8 +- + .../otel/semconv/v1.17.0/doc.go | 20 + + .../otel/semconv/v1.17.0/event.go | 199 + + .../unit.go => semconv/v1.17.0/exception.go} | 11 +- + .../otel/semconv/v1.17.0/http.go | 21 + + .../otel/semconv/v1.17.0/resource.go | 2010 ++ + .../otel/semconv/v1.17.0/schema.go | 20 + + .../otel/semconv/v1.17.0/trace.go | 3375 +++ + .../otel/semconv/v1.21.0/attribute_group.go | 1877 ++ + .../otel/semconv/v1.21.0/doc.go | 20 + + .../otel/semconv/v1.21.0/event.go | 199 + + .../otel/semconv/v1.21.0/exception.go | 20 + + .../otel/semconv/v1.21.0/resource.go | 2310 ++ + .../otel/semconv/v1.21.0/schema.go | 20 + + .../otel/semconv/v1.21.0/trace.go | 2495 ++ + .../go.opentelemetry.io/otel/trace/config.go | 18 + + vendor/go.opentelemetry.io/otel/trace/doc.go | 66 +- + .../otel/trace/embedded/embedded.go | 56 + + vendor/go.opentelemetry.io/otel/trace/noop.go | 14 +- + .../otel/trace/noop/noop.go | 118 + + .../go.opentelemetry.io/otel/trace/trace.go | 45 +- + .../otel/trace/tracestate.go | 38 +- + vendor/go.opentelemetry.io/otel/version.go | 2 +- + vendor/go.opentelemetry.io/otel/versions.yaml | 29 +- + .../collector/trace/v1/trace_service.pb.go | 2 +- + .../collector/trace/v1/trace_service.pb.gw.go | 26 +- + .../trace/v1/trace_service_grpc.pb.go | 2 +- + .../proto/otlp/common/v1/common.pb.go | 9 +- + .../proto/otlp/resource/v1/resource.pb.go | 2 +- + .../proto/otlp/trace/v1/trace.pb.go | 26 +- + .../chacha20poly1305/chacha20poly1305.go | 98 + + .../chacha20poly1305_amd64.go | 86 + + .../chacha20poly1305/chacha20poly1305_amd64.s | 2715 ++ + .../chacha20poly1305_generic.go | 81 + + .../chacha20poly1305_noasm.go | 15 + + .../chacha20poly1305/xchacha20poly1305.go | 86 + + vendor/golang.org/x/crypto/hkdf/hkdf.go | 95 + + .../x/oauth2/google/appengine_gen1.go | 1 - + .../x/oauth2/google/appengine_gen2_flex.go | 1 - + vendor/golang.org/x/oauth2/google/default.go | 9 +- + .../x/oauth2/internal/client_appengine.go | 1 - + vendor/golang.org/x/oauth2/internal/oauth2.go | 2 +- + vendor/golang.org/x/oauth2/internal/token.go | 60 +- + vendor/golang.org/x/oauth2/token.go | 19 +- + .../api/compute/v0.alpha/compute-api.json | 3303 ++- + .../api/compute/v0.alpha/compute-gen.go | 7612 +++++- + .../api/compute/v0.beta/compute-api.json | 5218 +++- + .../api/compute/v0.beta/compute-gen.go | 21084 ++++++++++++---- + .../api/compute/v1/compute-api.json | 1099 +- + .../api/compute/v1/compute-gen.go | 2832 ++- + .../api/container/v1/container-api.json | 317 +- + .../api/container/v1/container-gen.go | 732 +- + .../api/googleapi/googleapi.go | 5 +- + vendor/google.golang.org/api/internal/cba.go | 282 + + .../api/internal/cert/secureconnect_cert.go | 3 +- + .../google.golang.org/api/internal/creds.go | 5 +- + vendor/google.golang.org/api/internal/dca.go | 144 - + .../api/internal/gensupport/media.go | 7 +- + .../api/internal/gensupport/resumable.go | 9 +- + .../api/internal/gensupport/send.go | 5 + + .../api/internal/impersonate/impersonate.go | 3 +- + vendor/google.golang.org/api/internal/s2a.go | 136 + + .../api/internal/settings.go | 1 + + .../google.golang.org/api/internal/version.go | 2 +- + .../api/monitoring/v3/monitoring-api.json | 54 +- + .../api/monitoring/v3/monitoring-gen.go | 108 +- + .../option/internaloption/internaloption.go | 15 + + .../google.golang.org/api/tpu/v1/tpu-api.json | 6 +- + .../google.golang.org/api/tpu/v1/tpu-gen.go | 14 +- + .../api/transport/http/dial.go | 11 +- + .../internal/socket/socket_service.pb.go | 2822 +++ + .../internal/socket/socket_service.proto | 460 + + .../google.golang.org/appengine/socket/doc.go | 10 + + .../appengine/socket/socket_classic.go | 290 + + .../appengine/socket/socket_vm.go | 64 + + .../api/annotations/field_behavior.pb.go | 22 +- + .../genproto/googleapis/api/tidyfix.go | 23 + + .../genproto/internal/doc.go | 17 + + vendor/google.golang.org/grpc/README.md | 60 +- + .../grpc/attributes/attributes.go | 59 +- + .../grpc/balancer/balancer.go | 62 +- + .../grpc/balancer/base/balancer.go | 22 +- + .../grpc/balancer_conn_wrappers.go | 75 +- + .../grpc_binarylog_v1/binarylog.pb.go | 2 +- + vendor/google.golang.org/grpc/call.go | 11 +- + vendor/google.golang.org/grpc/clientconn.go | 248 +- + vendor/google.golang.org/grpc/codec.go | 8 +- + vendor/google.golang.org/grpc/dialoptions.go | 42 +- + .../grpc/encoding/encoding.go | 17 +- + .../grpc/encoding/gzip/gzip.go | 4 +- + .../grpc/encoding/proto/proto.go | 4 +- + .../grpc/grpclog/component.go | 40 +- + .../google.golang.org/grpc/grpclog/grpclog.go | 30 +- + .../google.golang.org/grpc/grpclog/logger.go | 30 +- + .../grpc/grpclog/loggerv2.go | 56 +- + .../google.golang.org/grpc/health/client.go | 2 +- + .../grpc/health/grpc_health_v1/health.pb.go | 2 +- + .../health/grpc_health_v1/health_grpc.pb.go | 22 +- + vendor/google.golang.org/grpc/interceptor.go | 12 +- + .../grpc/internal/backoff/backoff.go | 36 + + .../balancer/gracefulswitch/gracefulswitch.go | 59 +- + .../grpc/internal/balancerload/load.go | 4 +- + .../grpc/internal/binarylog/method_logger.go | 4 +- + .../grpc/internal/buffer/unbounded.go | 18 +- + .../grpc/internal/channelz/funcs.go | 69 +- + .../grpc/internal/channelz/logging.go | 12 +- + .../grpc/internal/channelz/types.go | 5 + + .../grpc/internal/channelz/util_linux.go | 2 +- + .../grpc/internal/channelz/util_nonlinux.go | 2 +- + .../grpc/internal/credentials/credentials.go | 8 +- + .../grpc/internal/envconfig/envconfig.go | 12 +- + .../grpc/internal/grpclog/grpclog.go | 40 +- + .../grpc/internal/grpclog/prefixLogger.go | 8 +- + .../grpc/internal/grpcrand/grpcrand.go | 7 + + .../internal/grpcsync/callback_serializer.go | 54 +- + .../grpc/internal/grpcsync/pubsub.go | 121 + + .../grpc/{ => internal/idle}/idle.go | 188 +- + .../grpc/internal/internal.go | 51 +- + .../grpc/internal/metadata/metadata.go | 2 +- + .../grpc/internal/pretty/pretty.go | 2 +- + .../grpc/internal/resolver/config_selector.go | 4 +- + .../internal/resolver/dns/dns_resolver.go | 74 +- + .../grpc/internal/status/status.go | 36 +- + .../grpc/internal/transport/controlbuf.go | 16 +- + .../grpc/internal/transport/handler_server.go | 13 +- + .../grpc/internal/transport/http2_client.go | 56 +- + .../grpc/internal/transport/http2_server.go | 22 +- + .../grpc/internal/transport/http_util.go | 77 +- + .../grpc/internal/transport/transport.go | 19 +- + .../google.golang.org/grpc/picker_wrapper.go | 34 +- + vendor/google.golang.org/grpc/pickfirst.go | 88 +- + vendor/google.golang.org/grpc/preloader.go | 2 +- + .../grpc/resolver/manual/manual.go | 44 +- + vendor/google.golang.org/grpc/resolver/map.go | 10 +- + .../grpc/resolver/resolver.go | 84 +- + .../grpc/resolver_conn_wrapper.go | 10 +- + vendor/google.golang.org/grpc/rpc_util.go | 44 +- + vendor/google.golang.org/grpc/server.go | 231 +- + .../grpc/shared_buffer_pool.go | 154 + + vendor/google.golang.org/grpc/stats/stats.go | 14 +- + .../google.golang.org/grpc/status/status.go | 14 +- + vendor/google.golang.org/grpc/stream.go | 130 +- + vendor/google.golang.org/grpc/tap/tap.go | 6 + + vendor/google.golang.org/grpc/trace.go | 6 +- + vendor/google.golang.org/grpc/version.go | 2 +- + vendor/google.golang.org/grpc/vet.sh | 10 +- + vendor/modules.txt | 130 +- + 450 files changed, 77542 insertions(+), 16460 deletions(-) + rename LICENSES/vendor/{go.opentelemetry.io/otel/exporters/otlp/internal/retry => github.com/google/s2a-go}/LICENSE (98%) + delete mode 100644 vendor/github.com/cenkalti/backoff/v4/.travis.yml + create mode 100644 vendor/github.com/go-logr/logr/SECURITY.md + rename vendor/github.com/google/go-cmp/cmp/{export_unsafe.go => export.go} (94%) + delete mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go + rename vendor/github.com/google/go-cmp/cmp/internal/value/{pointer_unsafe.go => pointer.go} (95%) + delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go + create mode 100644 vendor/github.com/google/s2a-go/.gitignore + create mode 100644 vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md + create mode 100644 vendor/github.com/google/s2a-go/CONTRIBUTING.md + rename vendor/{go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE => github.com/google/s2a-go/LICENSE.md} (99%) + create mode 100644 vendor/github.com/google/s2a-go/README.md + create mode 100644 vendor/github.com/google/s2a-go/fallback/s2a_fallback.go + create mode 100644 vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go + create mode 100644 vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go + create mode 100644 vendor/github.com/google/s2a-go/internal/handshaker/service/service.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/record.go + create mode 100644 vendor/github.com/google/s2a-go/internal/record/ticketsender.go + create mode 100644 vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/README.md + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/s2av2.go + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem + create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go + create mode 100644 vendor/github.com/google/s2a-go/s2a.go + create mode 100644 vendor/github.com/google/s2a-go/s2a_options.go + create mode 100644 vendor/github.com/google/s2a-go/s2a_utils.go + create mode 100644 vendor/github.com/google/s2a-go/stream/s2a_stream.go + create mode 100644 vendor/github.com/google/s2a-go/testdata/client_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/testdata/client_key.pem + create mode 100644 vendor/github.com/google/s2a-go/testdata/server_cert.pem + create mode 100644 vendor/github.com/google/s2a-go/testdata/server_key.pem + delete mode 100644 vendor/github.com/google/uuid/.travis.yml + create mode 100644 vendor/github.com/google/uuid/CHANGELOG.md + create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go + delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go + create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go + create mode 100644 vendor/go.opentelemetry.io/otel/.codespellignore + create mode 100644 vendor/go.opentelemetry.io/otel/.codespellrc + create mode 100644 vendor/go.opentelemetry.io/otel/attribute/filter.go + rename vendor/go.opentelemetry.io/otel/exporters/otlp/{ => otlptrace/otlptracegrpc}/internal/envconfig/envconfig.go (57%) + create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go + rename vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/{ => otlptracegrpc}/internal/otlpconfig/envconfig.go (74%) + rename vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/{ => otlptracegrpc}/internal/otlpconfig/options.go (89%) + rename vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/{ => otlptracegrpc}/internal/otlpconfig/optiontypes.go (90%) + rename vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/{ => otlptracegrpc}/internal/otlpconfig/tls.go (87%) + rename vendor/go.opentelemetry.io/otel/exporters/otlp/{ => otlptrace/otlptracegrpc}/internal/partialsuccess.go (64%) + rename vendor/go.opentelemetry.io/otel/exporters/otlp/{ => otlptrace/otlptracegrpc}/internal/retry/retry.go (80%) + rename vendor/go.opentelemetry.io/otel/{metric/unit/doc.go => exporters/otlp/otlptrace/version.go} (65%) + create mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go + create mode 100644 vendor/go.opentelemetry.io/otel/internal/gen.go + create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/handler.go + create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/instruments.go + create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/meter.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncint64.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/global/global.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/config.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/state.go + delete mode 100644 vendor/go.opentelemetry.io/otel/metric/noop.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncfloat64.go + create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncint64.go + create mode 100644 vendor/go.opentelemetry.io/otel/requirements.txt + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/gen.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go + rename vendor/go.opentelemetry.io/otel/{metric/instrument/instrument.go => sdk/resource/host_id_bsd.go} (54%) + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go + rename vendor/go.opentelemetry.io/otel/{exporters/otlp/internal/config.go => sdk/resource/host_id_exec.go} (50%) + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/version.go + create mode 100644 vendor/go.opentelemetry.io/otel/sdk/version.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go + rename vendor/go.opentelemetry.io/otel/{metric/unit/unit.go => semconv/v1.17.0/exception.go} (70%) + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go + create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go + create mode 100644 vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go + create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop/noop.go + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go + create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go + create mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf.go + create mode 100644 vendor/google.golang.org/api/internal/cba.go + delete mode 100644 vendor/google.golang.org/api/internal/dca.go + create mode 100644 vendor/google.golang.org/api/internal/s2a.go + create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go + create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.proto + create mode 100644 vendor/google.golang.org/appengine/socket/doc.go + create mode 100644 vendor/google.golang.org/appengine/socket/socket_classic.go + create mode 100644 vendor/google.golang.org/appengine/socket/socket_vm.go + create mode 100644 vendor/google.golang.org/genproto/googleapis/api/tidyfix.go + create mode 100644 vendor/google.golang.org/genproto/internal/doc.go + create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go + rename vendor/google.golang.org/grpc/{ => internal/idle}/idle.go (61%) + create mode 100644 vendor/google.golang.org/grpc/shared_buffer_pool.go + +diff --git a/LICENSES/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE b/LICENSES/vendor/github.com/google/s2a-go/LICENSE +similarity index 98% +rename from LICENSES/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE +rename to LICENSES/vendor/github.com/google/s2a-go/LICENSE +index bdbfa6963be..5f39be4994d 100644 +--- a/LICENSES/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE ++++ b/LICENSES/vendor/github.com/google/s2a-go/LICENSE +@@ -1,4 +1,5 @@ +-= vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry licensed under: = ++= vendor/github.com/google/s2a-go licensed under: = ++ + + Apache License + Version 2.0, January 2004 +@@ -202,4 +203,4 @@ + See the License for the specific language governing permissions and + limitations under the License. + +-= vendor/go.opentelemetry.io/otel/LICENSE 86d3f3a95c324c9479bd8986968f4327 ++= vendor/github.com/google/s2a-go/LICENSE.md 3b83ef96387f14655fc854ddc3c6bd57 +diff --git a/go.mod b/go.mod +index eca5b8f2bb0..b91a6b3210b 100644 +--- a/go.mod ++++ b/go.mod +@@ -32,7 +32,7 @@ require ( + github.com/emicklei/go-restful/v3 v3.9.0 + github.com/evanphx/json-patch v4.12.0+incompatible + github.com/fsnotify/fsnotify v1.6.0 +- github.com/go-logr/logr v1.2.3 ++ github.com/go-logr/logr v1.3.0 + github.com/godbus/dbus/v5 v5.0.6 + github.com/gogo/protobuf v1.3.2 + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da +@@ -41,9 +41,9 @@ require ( + github.com/google/cadvisor v0.47.2 + github.com/google/cel-go v0.12.7 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5 + github.com/libopenstorage/openstorage v1.0.0 + github.com/lithammer/dedent v1.1.0 +@@ -62,32 +62,32 @@ require ( + github.com/robfig/cron/v3 v3.0.1 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + github.com/vishvananda/netlink v1.1.0 + github.com/vmware/govmomi v0.30.6 + go.etcd.io/etcd/api/v3 v3.5.7 + go.etcd.io/etcd/client/pkg/v3 v3.5.7 + go.etcd.io/etcd/client/v3 v3.5.7 + go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 +- go.opentelemetry.io/otel v1.10.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 +- go.opentelemetry.io/otel/sdk v1.10.0 +- go.opentelemetry.io/otel/trace v1.10.0 +- go.opentelemetry.io/proto/otlp v0.19.0 ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 ++ go.opentelemetry.io/otel v1.20.0 ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 ++ go.opentelemetry.io/otel/sdk v1.20.0 ++ go.opentelemetry.io/otel/trace v1.20.0 ++ go.opentelemetry.io/proto/otlp v1.0.0 + go.uber.org/goleak v1.2.1 + go.uber.org/zap v1.19.0 + golang.org/x/crypto v0.22.0 + golang.org/x/net v0.24.0 +- golang.org/x/oauth2 v0.7.0 ++ golang.org/x/oauth2 v0.11.0 + golang.org/x/sync v0.5.0 + golang.org/x/sys v0.19.0 + golang.org/x/term v0.19.0 + golang.org/x/time v0.3.0 + golang.org/x/tools v0.16.1 +- google.golang.org/api v0.114.0 +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 +- google.golang.org/grpc v1.56.3 ++ google.golang.org/api v0.126.0 ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d ++ google.golang.org/grpc v1.59.0 + google.golang.org/protobuf v1.33.0 + gopkg.in/gcfg.v1 v1.2.3 + gopkg.in/square/go-jose.v2 v2.6.0 +@@ -130,7 +130,7 @@ require ( + ) + + require ( +- cloud.google.com/go/compute v1.19.1 // indirect ++ cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect +@@ -145,7 +145,7 @@ require ( + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect +@@ -173,15 +173,16 @@ require ( + github.com/golang-jwt/jwt/v4 v4.4.2 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect ++ github.com/google/s2a-go v0.1.4 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect +- github.com/googleapis/gax-go/v2 v2.7.1 // indirect ++ github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/gorilla/websocket v1.4.2 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect +@@ -224,18 +225,17 @@ require ( + go.etcd.io/etcd/raft/v3 v3.5.7 // indirect + go.etcd.io/etcd/server/v3 v3.5.7 // indirect + go.opencensus.io v0.24.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/warnings.v0 v0.1.1 // indirect +diff --git a/go.sum b/go.sum +index b935b43501b..7e1a75d2b20 100644 +--- a/go.sum ++++ b/go.sum +@@ -28,146 +28,143 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD + cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= + cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= + cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= + cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/azure-sdk-for-go v55.0.0+incompatible h1:L4/vUGbg1Xkw5L20LZD+hJI5I+ibWSytqQ68lTCfLwY= + github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +@@ -242,8 +239,8 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= + github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= +@@ -360,10 +357,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m + github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= + github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= + github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= + github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +@@ -403,8 +400,8 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -434,9 +431,8 @@ github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw + github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= + github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -501,8 +497,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -526,20 +522,22 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= ++github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= ++github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= + github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= + github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= + github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +-github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +-github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= ++github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= ++github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= + github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= + github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -554,8 +552,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf + github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= + github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= + github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +@@ -614,8 +612,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -750,8 +748,8 @@ github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzG + github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc= + github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= + github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +@@ -806,8 +804,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= + github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= + github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +@@ -864,29 +863,27 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= + go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= + go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 h1:KQjX0qQ8H21oBUAvFp4ZLKJMMLIluONvSPDAFIGmX58= + go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0/go.mod h1:DQYkU9srMFqLUTVA/7/WlRHdnYDB7wyMMlle2ktMjfI= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= + go.opentelemetry.io/contrib/propagators/b3 v1.10.0 h1:6AD2VV8edRdEYNaD8cNckpzgdMLU2kbV9OYyxt2kvCg= + go.opentelemetry.io/contrib/propagators/b3 v1.10.0/go.mod h1:oxvamQ/mTDFQVugml/uFS59+aEUnFLhmd1wsG+n5MOE= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= + go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= + go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +@@ -910,6 +907,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= + golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= ++golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= + golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= + golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= + golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +@@ -1018,10 +1016,9 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ + golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -1131,6 +1128,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= ++golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= + golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= + golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +@@ -1237,8 +1235,8 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv + google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= + google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= + google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= +-google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +-google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= ++google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= ++google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= + google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= + google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= + google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +@@ -1306,14 +1304,14 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc + google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= + google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= + google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= + google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -1340,11 +1338,11 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ + google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= + google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= + google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= ++google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= + google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= + google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +diff --git a/pkg/kubelet/cri/remote/remote_image.go b/pkg/kubelet/cri/remote/remote_image.go +index 1deff550fd8..29dd4b479c2 100644 +--- a/pkg/kubelet/cri/remote/remote_image.go ++++ b/pkg/kubelet/cri/remote/remote_image.go +@@ -60,6 +60,7 @@ func NewRemoteImageService(endpoint string, connectionTimeout time.Duration, tp + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) + if utilfeature.DefaultFeatureGate.Enabled(features.KubeletTracing) { + tracingOpts := []otelgrpc.Option{ ++ otelgrpc.WithMessageEvents(otelgrpc.ReceivedEvents, otelgrpc.SentEvents), + otelgrpc.WithPropagators(tracing.Propagators()), + otelgrpc.WithTracerProvider(tp), + } +diff --git a/pkg/kubelet/cri/remote/remote_runtime.go b/pkg/kubelet/cri/remote/remote_runtime.go +index 18e6bf7275f..8e4da10dca1 100644 +--- a/pkg/kubelet/cri/remote/remote_runtime.go ++++ b/pkg/kubelet/cri/remote/remote_runtime.go +@@ -86,6 +86,7 @@ func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration, t + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) + if utilfeature.DefaultFeatureGate.Enabled(features.KubeletTracing) { + tracingOpts := []otelgrpc.Option{ ++ otelgrpc.WithMessageEvents(otelgrpc.ReceivedEvents, otelgrpc.SentEvents), + otelgrpc.WithPropagators(tracing.Propagators()), + otelgrpc.WithTracerProvider(tp), + } +diff --git a/staging/src/k8s.io/api/go.mod b/staging/src/k8s.io/api/go.mod +index fa246877853..4d84bb0b5ca 100644 +--- a/staging/src/k8s.io/api/go.mod ++++ b/staging/src/k8s.io/api/go.mod +@@ -6,22 +6,21 @@ go 1.20 + + require ( + github.com/gogo/protobuf v1.3.2 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/apimachinery v0.0.0 + ) + + require ( + github.com/davecgh/go-spew v1.1.1 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect +- github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/text v0.14.0 // indirect +diff --git a/staging/src/k8s.io/api/go.sum b/staging/src/k8s.io/api/go.sum +index ade45657c1c..5904fd8520d 100644 +--- a/staging/src/k8s.io/api/go.sum ++++ b/staging/src/k8s.io/api/go.sum +@@ -5,8 +5,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= + github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +@@ -16,21 +16,21 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 + github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -48,18 +48,15 @@ github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3 + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -68,7 +65,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh + golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= + golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +-golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= ++golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= + golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= + golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +@@ -107,7 +104,6 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= + gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod +index a9d6dd6268a..ce202b362ac 100644 +--- a/staging/src/k8s.io/apiextensions-apiserver/go.mod ++++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod +@@ -9,18 +9,18 @@ require ( + github.com/gogo/protobuf v1.3.2 + github.com/google/cel-go v0.12.7 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + go.etcd.io/etcd/client/pkg/v3 v3.5.7 + go.etcd.io/etcd/client/v3 v3.5.7 +- go.opentelemetry.io/otel v1.10.0 +- go.opentelemetry.io/otel/trace v1.10.0 +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a +- google.golang.org/grpc v1.56.3 ++ go.opentelemetry.io/otel v1.20.0 ++ go.opentelemetry.io/otel/trace v1.20.0 ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d ++ google.golang.org/grpc v1.59.0 + google.golang.org/protobuf v1.33.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.0.0 +@@ -43,7 +43,7 @@ require ( + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.4.0 // indirect +@@ -52,7 +52,7 @@ require ( + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect +@@ -65,7 +65,7 @@ require ( + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect +@@ -94,21 +94,20 @@ require ( + go.etcd.io/etcd/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/raft/v3 v3.5.7 // indirect + go.etcd.io/etcd/server/v3 v3.5.7 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect +@@ -116,8 +115,8 @@ require ( + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.16.1 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum +index dee8827b571..d6ac2f95e6f 100644 +--- a/staging/src/k8s.io/apiextensions-apiserver/go.sum ++++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum +@@ -13,144 +13,143 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +@@ -158,7 +157,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +@@ -178,13 +176,12 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= + github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -194,13 +191,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= +@@ -228,13 +219,10 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +@@ -258,8 +246,8 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -280,9 +268,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 + github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= + github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -330,9 +317,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -349,8 +335,8 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -362,8 +348,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -393,8 +379,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -461,8 +447,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -472,7 +458,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 + github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -490,8 +475,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -522,27 +508,24 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +@@ -625,7 +608,6 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R + golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= + golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +@@ -638,10 +620,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -690,7 +671,6 @@ golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7w + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +@@ -710,7 +690,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +@@ -827,13 +806,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -847,11 +825,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji + google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -864,7 +839,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +diff --git a/staging/src/k8s.io/apimachinery/go.mod b/staging/src/k8s.io/apimachinery/go.mod +index 56bf795f689..0b5dc2e72cc 100644 +--- a/staging/src/k8s.io/apimachinery/go.mod ++++ b/staging/src/k8s.io/apimachinery/go.mod +@@ -11,14 +11,14 @@ require ( + github.com/gogo/protobuf v1.3.2 + github.com/golang/protobuf v1.5.4 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/moby/spdystream v0.2.0 + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f + github.com/onsi/ginkgo/v2 v2.9.1 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + golang.org/x/net v0.24.0 + golang.org/x/time v0.3.0 + gopkg.in/inf.v0 v0.9.1 +@@ -31,7 +31,7 @@ require ( + ) + + require ( +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect +@@ -39,13 +39,14 @@ require ( + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +- github.com/kr/pretty v0.3.0 // indirect ++ github.com/kr/pretty v0.3.1 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/gomega v1.27.4 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect ++ github.com/rogpeppe/go-internal v1.10.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.16.1 // indirect +diff --git a/staging/src/k8s.io/apimachinery/go.sum b/staging/src/k8s.io/apimachinery/go.sum +index 2740671c8e1..48dacc0868d 100644 +--- a/staging/src/k8s.io/apimachinery/go.sum ++++ b/staging/src/k8s.io/apimachinery/go.sum +@@ -20,8 +20,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +@@ -53,15 +53,15 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= + github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= + github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +@@ -70,11 +70,10 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -96,13 +95,15 @@ github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= + github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= + github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= + github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= ++github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= + github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= ++github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +@@ -113,8 +114,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +@@ -197,11 +199,9 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod +index ee17e124eed..cb38b8243cd 100644 +--- a/staging/src/k8s.io/apiserver/go.mod ++++ b/staging/src/k8s.io/apiserver/go.mod +@@ -14,31 +14,31 @@ require ( + github.com/gogo/protobuf v1.3.2 + github.com/google/cel-go v0.12.7 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + go.etcd.io/etcd/api/v3 v3.5.7 + go.etcd.io/etcd/client/pkg/v3 v3.5.7 + go.etcd.io/etcd/client/v3 v3.5.7 + go.etcd.io/etcd/server/v3 v3.5.7 +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 +- go.opentelemetry.io/otel v1.10.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 +- go.opentelemetry.io/otel/sdk v1.10.0 +- go.opentelemetry.io/otel/trace v1.10.0 ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 ++ go.opentelemetry.io/otel v1.20.0 ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 ++ go.opentelemetry.io/otel/sdk v1.20.0 ++ go.opentelemetry.io/otel/trace v1.20.0 + go.uber.org/zap v1.19.0 + golang.org/x/crypto v0.22.0 + golang.org/x/net v0.24.0 + golang.org/x/sync v0.5.0 + golang.org/x/sys v0.19.0 + golang.org/x/time v0.3.0 +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a +- google.golang.org/grpc v1.56.3 ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d ++ google.golang.org/grpc v1.59.0 + google.golang.org/protobuf v1.33.0 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/square/go-jose.v2 v2.6.0 +@@ -62,12 +62,12 @@ require ( + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect +@@ -79,7 +79,7 @@ require ( + github.com/gorilla/websocket v1.4.2 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect +@@ -107,18 +107,17 @@ require ( + go.etcd.io/etcd/client/v2 v2.305.7 // indirect + go.etcd.io/etcd/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/raft/v3 v3.5.7 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +diff --git a/staging/src/k8s.io/apiserver/go.sum b/staging/src/k8s.io/apiserver/go.sum +index 1a4f94bb196..5843da35d62 100644 +--- a/staging/src/k8s.io/apiserver/go.sum ++++ b/staging/src/k8s.io/apiserver/go.sum +@@ -13,144 +13,143 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +@@ -158,7 +157,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +@@ -178,13 +176,12 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= + github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -194,13 +191,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= +@@ -229,13 +220,10 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +@@ -258,8 +246,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -280,9 +268,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 + github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= + github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -330,9 +317,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -349,8 +335,8 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -362,8 +348,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -393,8 +379,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -462,8 +448,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -473,7 +459,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 + github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -492,8 +477,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -523,27 +509,24 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +@@ -625,7 +608,6 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R + golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= + golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +@@ -638,10 +620,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -690,7 +671,6 @@ golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7w + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +@@ -710,7 +690,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +@@ -826,13 +805,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -846,11 +824,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji + google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -863,7 +838,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go +index 67a1790c56a..fb9ca9af683 100644 +--- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go ++++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/traces.go +@@ -20,6 +20,7 @@ import ( + "net/http" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" + + tracing "k8s.io/component-base/tracing" +@@ -32,7 +33,16 @@ func WithTracing(handler http.Handler, tp trace.TracerProvider) http.Handler { + otelhttp.WithPublicEndpoint(), + otelhttp.WithTracerProvider(tp), + } ++ wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ++ // Add the http.target attribute to the otelhttp span ++ // Workaround for https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3743 ++ if r.URL != nil { ++ trace.SpanFromContext(r.Context()).SetAttributes(semconv.HTTPTarget(r.URL.RequestURI())) ++ } ++ handler.ServeHTTP(w, r) ++ }) ++ + // With Noop TracerProvider, the otelhttp still handles context propagation. + // See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough +- return otelhttp.NewHandler(handler, "KubernetesAPI", opts...) ++ return otelhttp.NewHandler(wrappedHandler, "KubernetesAPI", opts...) + } +diff --git a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +index 64bcabadb97..185124c6bfd 100644 +--- a/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go ++++ b/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +@@ -299,6 +299,7 @@ var newETCD3Client = func(c storagebackend.TransportConfig) (*clientv3.Client, e + } + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) { + tracingOpts := []otelgrpc.Option{ ++ otelgrpc.WithMessageEvents(otelgrpc.ReceivedEvents, otelgrpc.SentEvents), + otelgrpc.WithPropagators(tracing.Propagators()), + otelgrpc.WithTracerProvider(c.TracerProvider), + } +diff --git a/staging/src/k8s.io/cli-runtime/go.mod b/staging/src/k8s.io/cli-runtime/go.mod +index 141883dff20..d5dd87538d9 100644 +--- a/staging/src/k8s.io/cli-runtime/go.mod ++++ b/staging/src/k8s.io/cli-runtime/go.mod +@@ -8,11 +8,11 @@ require ( + github.com/davecgh/go-spew v1.1.1 + github.com/evanphx/json-patch v4.12.0+incompatible + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + golang.org/x/text v0.14.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.0.0 +@@ -29,14 +29,14 @@ require ( + require ( + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.0.1 // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect +@@ -55,7 +55,7 @@ require ( + github.com/xlab/treeprint v1.1.0 // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/time v0.3.0 // indirect +diff --git a/staging/src/k8s.io/cli-runtime/go.sum b/staging/src/k8s.io/cli-runtime/go.sum +index 2f2d42d0927..1471811fd2f 100644 +--- a/staging/src/k8s.io/cli-runtime/go.sum ++++ b/staging/src/k8s.io/cli-runtime/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +@@ -24,8 +25,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL + github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= + github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +@@ -60,16 +61,16 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +@@ -84,8 +85,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -115,8 +116,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= + github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +@@ -134,8 +135,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= + github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +@@ -165,8 +167,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +diff --git a/staging/src/k8s.io/client-go/go.mod b/staging/src/k8s.io/client-go/go.mod +index 355ad6283cd..727df9974c4 100644 +--- a/staging/src/k8s.io/client-go/go.mod ++++ b/staging/src/k8s.io/client-go/go.mod +@@ -11,16 +11,16 @@ require ( + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da + github.com/golang/protobuf v1.5.4 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 +- github.com/google/uuid v1.3.0 ++ github.com/google/uuid v1.3.1 + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 + github.com/imdario/mergo v0.3.6 + github.com/peterbourgon/diskv v2.0.1+incompatible + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + golang.org/x/net v0.24.0 +- golang.org/x/oauth2 v0.7.0 ++ golang.org/x/oauth2 v0.11.0 + golang.org/x/term v0.19.0 + golang.org/x/time v0.3.0 + google.golang.org/protobuf v1.33.0 +@@ -36,7 +36,7 @@ require ( + + require ( + github.com/emicklei/go-restful/v3 v3.9.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect +diff --git a/staging/src/k8s.io/client-go/go.sum b/staging/src/k8s.io/client-go/go.sum +index 7a9da85e99e..c6502dde8c7 100644 +--- a/staging/src/k8s.io/client-go/go.sum ++++ b/staging/src/k8s.io/client-go/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +@@ -19,8 +20,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +@@ -57,15 +58,15 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +@@ -79,8 +80,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -109,8 +110,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +@@ -121,8 +122,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -148,8 +150,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +diff --git a/staging/src/k8s.io/cloud-provider/go.mod b/staging/src/k8s.io/cloud-provider/go.mod +index d5b41fbaf14..53ba2d70c2d 100644 +--- a/staging/src/k8s.io/cloud-provider/go.mod ++++ b/staging/src/k8s.io/cloud-provider/go.mod +@@ -6,10 +6,10 @@ go 1.20 + + require ( + github.com/davecgh/go-spew v1.1.1 +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/apiserver v0.0.0 +@@ -28,7 +28,7 @@ require ( + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.4.0 // indirect +@@ -36,7 +36,7 @@ require ( + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect +@@ -48,9 +48,9 @@ require ( + github.com/google/cel-go v0.12.7 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect +@@ -72,32 +72,31 @@ require ( + go.etcd.io/etcd/api/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/v3 v3.5.7 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/otel/trace v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel v1.20.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/otel/trace v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect +diff --git a/staging/src/k8s.io/cloud-provider/go.sum b/staging/src/k8s.io/cloud-provider/go.sum +index 320cef02808..ed079bd1636 100644 +--- a/staging/src/k8s.io/cloud-provider/go.sum ++++ b/staging/src/k8s.io/cloud-provider/go.sum +@@ -13,144 +13,143 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +@@ -159,7 +158,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +@@ -179,11 +177,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -193,13 +190,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +@@ -222,20 +213,16 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= + github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= + github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +@@ -249,8 +236,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -271,9 +258,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 + github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= + github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -321,9 +307,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -339,9 +324,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -353,8 +337,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -384,8 +368,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -452,8 +436,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -462,7 +446,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 + github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -480,8 +463,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -511,27 +495,24 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +@@ -609,7 +590,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R + golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= + golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +@@ -621,10 +601,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -671,9 +650,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +@@ -691,7 +668,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +@@ -798,7 +774,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG + google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +@@ -806,13 +781,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -825,12 +799,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa + google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= + google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -843,7 +813,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +@@ -860,7 +829,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 + gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= + gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/cluster-bootstrap/go.mod b/staging/src/k8s.io/cluster-bootstrap/go.mod +index ca7c6125322..10113a8227d 100644 +--- a/staging/src/k8s.io/cluster-bootstrap/go.mod ++++ b/staging/src/k8s.io/cluster-bootstrap/go.mod +@@ -5,7 +5,7 @@ module k8s.io/cluster-bootstrap + go 1.20 + + require ( +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + gopkg.in/square/go-jose.v2 v2.6.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 +@@ -14,7 +14,7 @@ require ( + + require ( + github.com/davecgh/go-spew v1.1.1 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +diff --git a/staging/src/k8s.io/cluster-bootstrap/go.sum b/staging/src/k8s.io/cluster-bootstrap/go.sum +index 5f66c23975a..342a6b62976 100644 +--- a/staging/src/k8s.io/cluster-bootstrap/go.sum ++++ b/staging/src/k8s.io/cluster-bootstrap/go.sum +@@ -4,8 +4,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= + github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +@@ -14,20 +14,20 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= + github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +@@ -43,18 +43,15 @@ github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3 + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -104,7 +101,6 @@ gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= + gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +diff --git a/staging/src/k8s.io/code-generator/examples/go.mod b/staging/src/k8s.io/code-generator/examples/go.mod +index cf207828346..a7307cb83f9 100644 +--- a/staging/src/k8s.io/code-generator/examples/go.mod ++++ b/staging/src/k8s.io/code-generator/examples/go.mod +@@ -16,16 +16,16 @@ require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect +@@ -34,7 +34,7 @@ require ( + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect +diff --git a/staging/src/k8s.io/code-generator/examples/go.sum b/staging/src/k8s.io/code-generator/examples/go.sum +index e05b3967f17..a2b9fd9568e 100644 +--- a/staging/src/k8s.io/code-generator/examples/go.sum ++++ b/staging/src/k8s.io/code-generator/examples/go.sum +@@ -14,8 +14,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +@@ -47,14 +47,14 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +@@ -63,7 +63,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -84,7 +84,7 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +@@ -94,8 +94,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -119,8 +119,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +diff --git a/staging/src/k8s.io/code-generator/go.mod b/staging/src/k8s.io/code-generator/go.mod +index b940331fc2d..ce70f911da9 100644 +--- a/staging/src/k8s.io/code-generator/go.mod ++++ b/staging/src/k8s.io/code-generator/go.mod +@@ -16,22 +16,24 @@ require ( + + require ( + github.com/emicklei/go-restful/v3 v3.9.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +- github.com/kr/pretty v0.3.0 // indirect ++ github.com/kr/pretty v0.3.1 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.9.1 // indirect + github.com/onsi/gomega v1.27.4 // indirect ++ github.com/rogpeppe/go-internal v1.10.0 // indirect ++ github.com/stretchr/testify v1.8.4 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect +diff --git a/staging/src/k8s.io/code-generator/go.sum b/staging/src/k8s.io/code-generator/go.sum +index 3cffe3907d6..ef123248675 100644 +--- a/staging/src/k8s.io/code-generator/go.sum ++++ b/staging/src/k8s.io/code-generator/go.sum +@@ -18,8 +18,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +@@ -51,8 +51,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -66,11 +66,10 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -88,11 +87,13 @@ github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= + github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= + github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= + github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= ++github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= ++github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +@@ -103,8 +104,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +@@ -187,11 +189,9 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +diff --git a/staging/src/k8s.io/component-base/go.mod b/staging/src/k8s.io/component-base/go.mod +index c86548d2b85..a5840655dba 100644 +--- a/staging/src/k8s.io/component-base/go.mod ++++ b/staging/src/k8s.io/component-base/go.mod +@@ -6,9 +6,9 @@ go 1.20 + + require ( + github.com/blang/semver/v4 v4.0.0 +- github.com/go-logr/logr v1.2.3 ++ github.com/go-logr/logr v1.3.0 + github.com/go-logr/zapr v1.2.3 +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 + github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_model v0.3.0 +@@ -16,12 +16,12 @@ require ( + github.com/prometheus/procfs v0.8.0 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 +- go.opentelemetry.io/otel v1.10.0 +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 +- go.opentelemetry.io/otel/sdk v1.10.0 +- go.opentelemetry.io/otel/trace v1.10.0 ++ github.com/stretchr/testify v1.8.4 ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 ++ go.opentelemetry.io/otel v1.20.0 ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 ++ go.opentelemetry.io/otel/sdk v1.20.0 ++ go.opentelemetry.io/otel/trace v1.20.0 + go.uber.org/zap v1.19.0 + golang.org/x/sys v0.19.0 + k8s.io/apimachinery v0.0.0 +@@ -34,7 +34,7 @@ require ( + require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/beorn7/perks v1.0.1 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect +@@ -47,8 +47,8 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +@@ -58,23 +58,20 @@ require ( + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect +- go.uber.org/goleak v1.2.1 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +diff --git a/staging/src/k8s.io/component-base/go.sum b/staging/src/k8s.io/component-base/go.sum +index 5d1a8dd9acf..34e41e5c47c 100644 +--- a/staging/src/k8s.io/component-base/go.sum ++++ b/staging/src/k8s.io/component-base/go.sum +@@ -13,148 +13,31 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +-cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +-cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +@@ -171,11 +54,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -185,13 +67,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= + github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +@@ -206,17 +82,13 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= + github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +@@ -230,8 +102,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -249,9 +121,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -296,9 +167,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -314,15 +184,13 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -349,8 +217,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -415,13 +283,12 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= + github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -438,8 +305,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +@@ -449,25 +317,22 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +@@ -544,7 +409,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R + golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= + golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +@@ -556,10 +420,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -570,7 +433,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ + golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +-golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +@@ -605,9 +468,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +@@ -624,7 +485,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +@@ -731,7 +591,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG + google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +@@ -739,13 +598,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -758,12 +616,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa + google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= + google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -776,7 +630,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +@@ -790,7 +643,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/component-helpers/go.mod b/staging/src/k8s.io/component-helpers/go.mod +index 61922b53a3a..6307b4654c7 100644 +--- a/staging/src/k8s.io/component-helpers/go.mod ++++ b/staging/src/k8s.io/component-helpers/go.mod +@@ -5,7 +5,7 @@ module k8s.io/component-helpers + go 1.20 + + require ( +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/client-go v0.0.0 +@@ -17,7 +17,7 @@ require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect +@@ -25,7 +25,7 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect +@@ -34,7 +34,7 @@ require ( + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect +diff --git a/staging/src/k8s.io/component-helpers/go.sum b/staging/src/k8s.io/component-helpers/go.sum +index c94f2cf9acf..0a92870d196 100644 +--- a/staging/src/k8s.io/component-helpers/go.sum ++++ b/staging/src/k8s.io/component-helpers/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +@@ -18,8 +19,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +@@ -54,15 +55,15 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +@@ -73,8 +74,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -101,8 +102,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +@@ -113,8 +114,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -140,8 +142,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +diff --git a/staging/src/k8s.io/controller-manager/go.mod b/staging/src/k8s.io/controller-manager/go.mod +index efee82f539b..6f085862f30 100644 +--- a/staging/src/k8s.io/controller-manager/go.mod ++++ b/staging/src/k8s.io/controller-manager/go.mod +@@ -6,8 +6,8 @@ go 1.20 + + require ( + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 +- golang.org/x/oauth2 v0.7.0 ++ github.com/stretchr/testify v1.8.4 ++ golang.org/x/oauth2 v0.11.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/apiserver v0.0.0 +@@ -23,7 +23,7 @@ require ( + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.4.0 // indirect +@@ -32,7 +32,7 @@ require ( + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect +@@ -42,11 +42,11 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.12.7 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect +@@ -68,16 +68,15 @@ require ( + go.etcd.io/etcd/api/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/v3 v3.5.7 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/otel/trace v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel v1.20.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/otel/trace v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect +@@ -89,10 +88,10 @@ require ( + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +diff --git a/staging/src/k8s.io/controller-manager/go.sum b/staging/src/k8s.io/controller-manager/go.sum +index 77e8f2add3e..e4058994ae0 100644 +--- a/staging/src/k8s.io/controller-manager/go.sum ++++ b/staging/src/k8s.io/controller-manager/go.sum +@@ -13,151 +13,149 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +@@ -177,11 +175,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -191,13 +188,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +@@ -218,20 +209,16 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= + github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= + github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +@@ -245,8 +232,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -267,9 +254,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 + github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= + github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -317,9 +303,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -335,9 +320,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -349,8 +333,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -380,8 +364,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -447,8 +431,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -457,7 +441,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 + github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -475,8 +458,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -506,27 +490,24 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +@@ -604,7 +585,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R + golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= + golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +@@ -616,10 +596,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -666,9 +645,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +@@ -685,7 +662,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +@@ -792,7 +768,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG + google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +@@ -800,13 +775,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -819,12 +793,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa + google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= + google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -837,7 +807,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +@@ -854,7 +823,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 + gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= + gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/cri-api/go.mod b/staging/src/k8s.io/cri-api/go.mod +index 94a8857274a..e49e7cb7e2a 100644 +--- a/staging/src/k8s.io/cri-api/go.mod ++++ b/staging/src/k8s.io/cri-api/go.mod +@@ -6,19 +6,21 @@ go 1.20 + + require ( + github.com/gogo/protobuf v1.3.2 +- github.com/stretchr/testify v1.8.1 +- google.golang.org/grpc v1.56.3 ++ github.com/stretchr/testify v1.8.4 ++ google.golang.org/grpc v1.59.0 + ) + + require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang/protobuf v1.5.4 // indirect +- github.com/kr/pretty v0.3.0 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect ++ github.com/kr/pretty v0.3.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect ++ github.com/rogpeppe/go-internal v1.10.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +diff --git a/staging/src/k8s.io/cri-api/go.sum b/staging/src/k8s.io/cri-api/go.sum +index 3ff639816c6..0af9b490a50 100644 +--- a/staging/src/k8s.io/cri-api/go.sum ++++ b/staging/src/k8s.io/cri-api/go.sum +@@ -1,44 +1,40 @@ +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= ++github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= ++github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -54,10 +50,11 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +@@ -78,18 +75,16 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +diff --git a/staging/src/k8s.io/csi-translation-lib/go.mod b/staging/src/k8s.io/csi-translation-lib/go.mod +index 795791c68c3..0bbc9874d5c 100644 +--- a/staging/src/k8s.io/csi-translation-lib/go.mod ++++ b/staging/src/k8s.io/csi-translation-lib/go.mod +@@ -5,7 +5,7 @@ module k8s.io/csi-translation-lib + go 1.20 + + require ( +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/klog/v2 v2.90.1 +@@ -13,10 +13,10 @@ require ( + + require ( + github.com/davecgh/go-spew v1.1.1 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect +diff --git a/staging/src/k8s.io/csi-translation-lib/go.sum b/staging/src/k8s.io/csi-translation-lib/go.sum +index 74cd86cda2a..a4fd440d5c3 100644 +--- a/staging/src/k8s.io/csi-translation-lib/go.sum ++++ b/staging/src/k8s.io/csi-translation-lib/go.sum +@@ -4,8 +4,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= + github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +@@ -14,21 +14,21 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= + github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +@@ -44,18 +44,15 @@ github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3 + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -102,7 +99,6 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= + gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.mod b/staging/src/k8s.io/dynamic-resource-allocation/go.mod +index 2dd4044f0c4..316df041696 100644 +--- a/staging/src/k8s.io/dynamic-resource-allocation/go.mod ++++ b/staging/src/k8s.io/dynamic-resource-allocation/go.mod +@@ -5,10 +5,10 @@ module k8s.io/dynamic-resource-allocation + go 1.20 + + require ( +- github.com/go-logr/logr v1.2.3 +- github.com/google/go-cmp v0.5.9 +- github.com/stretchr/testify v1.8.1 +- google.golang.org/grpc v1.56.3 ++ github.com/go-logr/logr v1.3.0 ++ github.com/google/go-cmp v0.6.0 ++ github.com/stretchr/testify v1.8.4 ++ google.golang.org/grpc v1.59.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/client-go v0.0.0 +@@ -28,7 +28,7 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect +@@ -38,13 +38,13 @@ require ( + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.sum b/staging/src/k8s.io/dynamic-resource-allocation/go.sum +index ac43c3d716f..d59f7fd4154 100644 +--- a/staging/src/k8s.io/dynamic-resource-allocation/go.sum ++++ b/staging/src/k8s.io/dynamic-resource-allocation/go.sum +@@ -1,5 +1,5 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +@@ -21,14 +21,14 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 + github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= + github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +@@ -40,7 +40,7 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78 + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +@@ -65,15 +65,15 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +@@ -85,8 +85,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -118,8 +118,8 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: + github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= + github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +@@ -131,8 +131,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -158,13 +159,14 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +@@ -203,14 +205,15 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= + google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +diff --git a/staging/src/k8s.io/kms/go.mod b/staging/src/k8s.io/kms/go.mod +index 525ed6edbe9..e97532a49b7 100644 +--- a/staging/src/k8s.io/kms/go.mod ++++ b/staging/src/k8s.io/kms/go.mod +@@ -6,20 +6,20 @@ go 1.20 + + require ( + github.com/gogo/protobuf v1.3.2 +- google.golang.org/grpc v1.56.3 ++ google.golang.org/grpc v1.59.0 + k8s.io/apimachinery v0.0.0 + k8s.io/client-go v0.0.0 + k8s.io/klog/v2 v2.90.1 + ) + + require ( +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/protobuf v1.33.0 // indirect + k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect + ) +diff --git a/staging/src/k8s.io/kms/go.sum b/staging/src/k8s.io/kms/go.sum +index b662445c9fb..16acdd5bb68 100644 +--- a/staging/src/k8s.io/kms/go.sum ++++ b/staging/src/k8s.io/kms/go.sum +@@ -1,4 +1,4 @@ +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +@@ -7,36 +7,36 @@ github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XP + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= + github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= + github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +@@ -48,8 +48,9 @@ github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3 + github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -65,10 +66,11 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +@@ -91,11 +93,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +diff --git a/staging/src/k8s.io/kms/internal/plugins/mock/go.mod b/staging/src/k8s.io/kms/internal/plugins/mock/go.mod +index 5a72400bf3a..8b392d26535 100644 +--- a/staging/src/k8s.io/kms/internal/plugins/mock/go.mod ++++ b/staging/src/k8s.io/kms/internal/plugins/mock/go.mod +@@ -8,15 +8,15 @@ require ( + ) + + require ( +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect +- google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + k8s.io/client-go v0.0.0 // indirect + k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect +diff --git a/staging/src/k8s.io/kms/internal/plugins/mock/go.sum b/staging/src/k8s.io/kms/internal/plugins/mock/go.sum +index 843c0509d14..ed48752b16f 100644 +--- a/staging/src/k8s.io/kms/internal/plugins/mock/go.sum ++++ b/staging/src/k8s.io/kms/internal/plugins/mock/go.sum +@@ -1,11 +1,11 @@ + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +@@ -43,10 +43,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +-google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +-google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod +index 17e4d301411..21aa8b0b473 100644 +--- a/staging/src/k8s.io/kube-aggregator/go.mod ++++ b/staging/src/k8s.io/kube-aggregator/go.mod +@@ -8,11 +8,11 @@ require ( + github.com/davecgh/go-spew v1.1.1 + github.com/emicklei/go-restful/v3 v3.9.0 + github.com/gogo/protobuf v1.3.2 +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.1.0 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + golang.org/x/net v0.24.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 +@@ -32,14 +32,14 @@ require ( + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.4.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect +@@ -48,9 +48,9 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.12.7 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect +@@ -72,22 +72,21 @@ require ( + go.etcd.io/etcd/api/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/v3 v3.5.7 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/otel/trace v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel v1.20.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/otel/trace v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/mod v0.14.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect +@@ -95,10 +94,10 @@ require ( + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.16.1 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect +diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum +index dd3f900b3b6..4cfa4400ab9 100644 +--- a/staging/src/k8s.io/kube-aggregator/go.sum ++++ b/staging/src/k8s.io/kube-aggregator/go.sum +@@ -13,144 +13,143 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +@@ -158,7 +157,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +@@ -178,11 +176,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -192,13 +189,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +@@ -219,20 +210,16 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= + github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= + github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +@@ -247,8 +234,8 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -269,9 +256,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 + github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= + github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -319,9 +305,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -337,9 +322,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -351,8 +335,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -382,8 +366,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -450,8 +434,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -460,7 +444,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 + github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -478,8 +461,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -510,27 +494,24 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +@@ -609,7 +590,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R + golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= + golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +@@ -621,10 +601,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -671,9 +650,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +@@ -690,7 +667,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +@@ -798,7 +774,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG + google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +@@ -806,13 +781,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -825,12 +799,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa + google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= + google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -843,7 +813,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +@@ -860,7 +829,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 + gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= + gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/kube-controller-manager/go.mod b/staging/src/k8s.io/kube-controller-manager/go.mod +index 52f6c10f479..8d029ca0cfe 100644 +--- a/staging/src/k8s.io/kube-controller-manager/go.mod ++++ b/staging/src/k8s.io/kube-controller-manager/go.mod +@@ -11,14 +11,13 @@ require ( + ) + + require ( +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect +- github.com/rogpeppe/go-internal v1.12.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/text v0.14.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect +diff --git a/staging/src/k8s.io/kube-controller-manager/go.sum b/staging/src/k8s.io/kube-controller-manager/go.sum +index 181f0fb60e9..f737c385499 100644 +--- a/staging/src/k8s.io/kube-controller-manager/go.sum ++++ b/staging/src/k8s.io/kube-controller-manager/go.sum +@@ -5,7 +5,7 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= + github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +@@ -18,8 +18,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +@@ -32,15 +32,15 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4er + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/cel-go v0.12.7/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +@@ -49,8 +49,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -76,33 +76,31 @@ github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQg + github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= + github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= + go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= + go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +-go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -111,14 +109,14 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh + golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= + golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +-golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= ++golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= + golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= + golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -143,10 +141,10 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +diff --git a/staging/src/k8s.io/kube-proxy/go.mod b/staging/src/k8s.io/kube-proxy/go.mod +index 05ed8ac6286..8ab1d23e37d 100644 +--- a/staging/src/k8s.io/kube-proxy/go.mod ++++ b/staging/src/k8s.io/kube-proxy/go.mod +@@ -10,15 +10,14 @@ require ( + ) + + require ( +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect +- github.com/rogpeppe/go-internal v1.12.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/text v0.14.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect +diff --git a/staging/src/k8s.io/kube-proxy/go.sum b/staging/src/k8s.io/kube-proxy/go.sum +index 5d1087839fe..26b5a190ba3 100644 +--- a/staging/src/k8s.io/kube-proxy/go.sum ++++ b/staging/src/k8s.io/kube-proxy/go.sum +@@ -2,7 +2,7 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6 + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= + github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +@@ -12,8 +12,8 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +@@ -24,14 +24,14 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +@@ -39,8 +39,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -65,28 +65,26 @@ github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQg + github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= + github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +-go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -95,14 +93,14 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh + golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= + golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +-golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= ++golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= + golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= + golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -126,10 +124,9 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +diff --git a/staging/src/k8s.io/kube-scheduler/go.mod b/staging/src/k8s.io/kube-scheduler/go.mod +index 4f262e8f105..d6bf110c59b 100644 +--- a/staging/src/k8s.io/kube-scheduler/go.mod ++++ b/staging/src/k8s.io/kube-scheduler/go.mod +@@ -5,7 +5,7 @@ module k8s.io/kube-scheduler + go 1.20 + + require ( +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/component-base v0.0.0 +@@ -13,7 +13,7 @@ require ( + ) + + require ( +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +diff --git a/staging/src/k8s.io/kube-scheduler/go.sum b/staging/src/k8s.io/kube-scheduler/go.sum +index 07fa10c48cd..cf02e87c864 100644 +--- a/staging/src/k8s.io/kube-scheduler/go.sum ++++ b/staging/src/k8s.io/kube-scheduler/go.sum +@@ -2,7 +2,7 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6 + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +@@ -11,8 +11,8 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +@@ -23,22 +23,22 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= + github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= + github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= + github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= + github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= + github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= + github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +@@ -61,28 +61,26 @@ github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQg + github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= + github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +-go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -98,7 +96,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -122,10 +120,9 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +diff --git a/staging/src/k8s.io/kubectl/go.mod b/staging/src/k8s.io/kubectl/go.mod +index 397571a012d..abe22084044 100644 +--- a/staging/src/k8s.io/kubectl/go.mod ++++ b/staging/src/k8s.io/kubectl/go.mod +@@ -15,7 +15,7 @@ require ( + github.com/fvbommel/sortorder v1.0.1 + github.com/go-openapi/jsonreference v0.20.1 + github.com/google/gnostic v0.5.7-v3refs +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/jonboulle/clockwork v0.2.2 + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de + github.com/lithammer/dedent v1.1.0 +@@ -27,7 +27,7 @@ require ( + github.com/russross/blackfriday/v2 v2.1.0 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + golang.org/x/sys v0.19.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.0.0 +@@ -52,7 +52,7 @@ require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect +@@ -62,7 +62,7 @@ require ( + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect +@@ -81,7 +81,7 @@ require ( + github.com/xlab/treeprint v1.1.0 // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect +diff --git a/staging/src/k8s.io/kubectl/go.sum b/staging/src/k8s.io/kubectl/go.sum +index f3912424968..cebe9ec2dbf 100644 +--- a/staging/src/k8s.io/kubectl/go.sum ++++ b/staging/src/k8s.io/kubectl/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +@@ -11,7 +12,7 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +@@ -48,8 +49,8 @@ github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui72 + github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= + github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +@@ -92,8 +93,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -101,12 +102,12 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= + github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +@@ -122,8 +123,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -170,8 +171,8 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: + github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= + github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +@@ -190,26 +191,25 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= + github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +-go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +@@ -235,8 +235,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -284,13 +284,12 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= + google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +diff --git a/staging/src/k8s.io/kubelet/go.mod b/staging/src/k8s.io/kubelet/go.mod +index 26c1b865356..c72b6ec69c4 100644 +--- a/staging/src/k8s.io/kubelet/go.mod ++++ b/staging/src/k8s.io/kubelet/go.mod +@@ -6,7 +6,7 @@ go 1.20 + + require ( + github.com/gogo/protobuf v1.3.2 +- google.golang.org/grpc v1.56.3 ++ google.golang.org/grpc v1.59.0 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/component-base v0.0.0 +@@ -16,9 +16,9 @@ require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/json-iterator/go v1.1.12 // indirect +@@ -34,7 +34,7 @@ require ( + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +diff --git a/staging/src/k8s.io/kubelet/go.sum b/staging/src/k8s.io/kubelet/go.sum +index 83f346db617..98ee2bad27c 100644 +--- a/staging/src/k8s.io/kubelet/go.sum ++++ b/staging/src/k8s.io/kubelet/go.sum +@@ -19,7 +19,7 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +@@ -48,7 +48,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +@@ -70,9 +70,9 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +@@ -87,8 +87,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V + github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +@@ -100,7 +100,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a + github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= + github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -141,8 +141,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -157,10 +157,10 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -183,8 +183,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv + github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -240,8 +240,8 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 + github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -255,8 +255,8 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ + github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +@@ -266,17 +266,15 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +-go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= + golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +@@ -358,7 +356,7 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -369,7 +367,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ + golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +-golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +@@ -526,10 +524,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc + google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -542,8 +540,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa + google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= + google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController_test.go +index dc6a1a16460..fc27a8cf83c 100644 +--- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController_test.go ++++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_blobDiskController_test.go +@@ -354,7 +354,7 @@ func TestCreateBlobDisk(t *testing.T) { + }, + }, nil) + diskURI, err := b.CreateBlobDisk("datadisk", storage.StandardGRS, 10) +- expectedErr := "failed to put page blob datadisk.vhd in container vhds: storage: service returned error: StatusCode=403" ++ expectedErr := "failed to put page blob datadisk.vhd in container vhds" + assert.Error(t, err) + assert.True(t, strings.Contains(err.Error(), expectedErr)) + assert.Empty(t, diskURI) +diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.mod b/staging/src/k8s.io/legacy-cloud-providers/go.mod +index 99e854b4aa1..da877d7193f 100644 +--- a/staging/src/k8s.io/legacy-cloud-providers/go.mod ++++ b/staging/src/k8s.io/legacy-cloud-providers/go.mod +@@ -12,13 +12,13 @@ require ( + github.com/Azure/go-autorest/autorest/mocks v0.4.2 + github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b + github.com/golang/mock v1.6.0 +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + github.com/vmware/govmomi v0.30.6 + golang.org/x/crypto v0.22.0 +- golang.org/x/oauth2 v0.7.0 +- google.golang.org/api v0.114.0 ++ golang.org/x/oauth2 v0.11.0 ++ google.golang.org/api v0.126.0 + gopkg.in/gcfg.v1 v1.2.3 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 +@@ -31,7 +31,7 @@ require ( + ) + + require ( +- cloud.google.com/go/compute v1.19.1 // indirect ++ cloud.google.com/go/compute v1.23.0 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect +@@ -45,7 +45,7 @@ require ( + github.com/dnaeon/go-vcr v1.2.0 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect +@@ -56,9 +56,10 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/s2a-go v0.1.4 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect +- github.com/googleapis/gax-go/v2 v2.7.1 // indirect ++ github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect +@@ -80,8 +81,8 @@ require ( + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/warnings.v0 v0.1.1 // indirect +diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.sum b/staging/src/k8s.io/legacy-cloud-providers/go.sum +index ed2e97b0203..352ccd43ede 100644 +--- a/staging/src/k8s.io/legacy-cloud-providers/go.sum ++++ b/staging/src/k8s.io/legacy-cloud-providers/go.sum +@@ -24,22 +24,20 @@ cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWc + cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= + cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= + cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +-cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= + cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= ++cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +@@ -95,7 +93,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= + github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +@@ -110,8 +108,12 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= + github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= ++github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= ++github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= + github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +@@ -132,9 +134,10 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y + github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= + github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +@@ -153,8 +156,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +@@ -176,7 +179,7 @@ github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw + github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= + github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -230,8 +233,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -255,21 +258,23 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= ++github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= ++github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= + github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= + github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= + github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +-github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +-github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= ++github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= ++github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -296,8 +301,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -363,8 +368,8 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua + github.com/rasky/go-xdr v0.0.0-20170217172119-4930550ba2e2/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc= + github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +@@ -384,10 +389,12 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV + github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= ++github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/vmware/govmomi v0.30.6 h1:O3tjSwQBy0XwI5uK1/yVIfQ1LP9bAECEDUfifnyGs9U= + github.com/vmware/govmomi v0.30.6/go.mod h1:epgoslm97rLECMV4D+08ORzUBEU7boFSepKjt7AYVGg= + github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= +@@ -396,6 +403,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de + github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= ++github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= + go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= + go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= + go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= +@@ -408,19 +416,17 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= + go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= + go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= + go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= + go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +-go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= + go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= + go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= + golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +@@ -431,6 +437,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= + golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= ++golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= + golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= + golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= + golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +@@ -468,6 +475,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= ++golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= + golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= + golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +@@ -510,6 +518,7 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx + golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= ++golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +@@ -529,8 +538,8 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ + golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -542,6 +551,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ++golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= + golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +@@ -600,6 +611,8 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc + golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ++golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= + golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +@@ -615,6 +628,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= ++golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= + golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= + golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +@@ -675,6 +689,7 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= + golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= + golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= + golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= ++golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= + golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= + golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +@@ -712,8 +727,8 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv + google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= + google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= + google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= +-google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +-google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= ++google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= ++google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= + google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= + google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= + google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +@@ -780,12 +795,13 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc + google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= + google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= + google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -811,8 +827,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ + google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= + google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= + google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +diff --git a/staging/src/k8s.io/metrics/go.mod b/staging/src/k8s.io/metrics/go.mod +index 9f9d7331369..1595b4a2093 100644 +--- a/staging/src/k8s.io/metrics/go.mod ++++ b/staging/src/k8s.io/metrics/go.mod +@@ -6,7 +6,7 @@ go 1.20 + + require ( + github.com/gogo/protobuf v1.3.2 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/client-go v0.0.0 +@@ -17,15 +17,15 @@ require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect +@@ -37,7 +37,7 @@ require ( + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect +diff --git a/staging/src/k8s.io/metrics/go.sum b/staging/src/k8s.io/metrics/go.sum +index 3e1ff947687..74f4362bed2 100644 +--- a/staging/src/k8s.io/metrics/go.sum ++++ b/staging/src/k8s.io/metrics/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +@@ -19,8 +20,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +@@ -55,15 +56,15 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +@@ -74,8 +75,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -102,8 +103,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +@@ -114,8 +115,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +@@ -143,8 +145,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +diff --git a/staging/src/k8s.io/mount-utils/go.mod b/staging/src/k8s.io/mount-utils/go.mod +index a9da80f2ef5..c3890d44340 100644 +--- a/staging/src/k8s.io/mount-utils/go.mod ++++ b/staging/src/k8s.io/mount-utils/go.mod +@@ -6,16 +6,17 @@ go 1.20 + + require ( + github.com/moby/sys/mountinfo v0.6.2 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/klog/v2 v2.90.1 + k8s.io/utils v0.0.0-20230209194617-a36077c30491 + ) + + require ( + github.com/davecgh/go-spew v1.1.1 // indirect +- github.com/go-logr/logr v1.2.3 // indirect +- github.com/kr/pretty v0.3.0 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect ++ github.com/kr/pretty v0.3.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect ++ github.com/rogpeppe/go-internal v1.10.0 // indirect + golang.org/x/sys v0.19.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +diff --git a/staging/src/k8s.io/mount-utils/go.sum b/staging/src/k8s.io/mount-utils/go.sum +index 7222a481fa4..d55ca4a531d 100644 +--- a/staging/src/k8s.io/mount-utils/go.sum ++++ b/staging/src/k8s.io/mount-utils/go.sum +@@ -1,40 +1,33 @@ + github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= + github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= + github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= + github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= ++github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= ++github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= + golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= + gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +diff --git a/staging/src/k8s.io/pod-security-admission/go.mod b/staging/src/k8s.io/pod-security-admission/go.mod +index e1625ac2960..b163f3d661e 100644 +--- a/staging/src/k8s.io/pod-security-admission/go.mod ++++ b/staging/src/k8s.io/pod-security-admission/go.mod +@@ -6,10 +6,10 @@ go 1.20 + + require ( + github.com/blang/semver/v4 v4.0.0 +- github.com/google/go-cmp v0.5.9 ++ github.com/google/go-cmp v0.6.0 + github.com/spf13/cobra v1.6.0 + github.com/spf13/pflag v1.0.5 +- github.com/stretchr/testify v1.8.1 ++ github.com/stretchr/testify v1.8.4 + k8s.io/api v0.0.0 + k8s.io/apimachinery v0.0.0 + k8s.io/apiserver v0.0.0 +@@ -25,7 +25,7 @@ require ( + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.4.0 // indirect +@@ -34,7 +34,7 @@ require ( + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect +@@ -46,9 +46,9 @@ require ( + github.com/google/cel-go v0.12.7 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect +@@ -69,32 +69,31 @@ require ( + go.etcd.io/etcd/api/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/v3 v3.5.7 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/otel/trace v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel v1.20.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/otel/trace v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect +diff --git a/staging/src/k8s.io/pod-security-admission/go.sum b/staging/src/k8s.io/pod-security-admission/go.sum +index 8cc31d20277..dcbeb8e3023 100644 +--- a/staging/src/k8s.io/pod-security-admission/go.sum ++++ b/staging/src/k8s.io/pod-security-admission/go.sum +@@ -13,144 +13,143 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +@@ -158,7 +157,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +@@ -178,11 +176,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -192,13 +189,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +@@ -219,20 +210,16 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= + github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= + github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +@@ -246,8 +233,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG + github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -268,9 +255,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 + github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= + github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -318,9 +304,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -336,9 +321,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -350,8 +334,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -381,8 +365,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -448,8 +432,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -458,7 +442,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 + github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -476,8 +459,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -507,27 +491,24 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +@@ -605,7 +586,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R + golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= + golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +@@ -617,10 +597,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -667,9 +646,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +@@ -686,7 +663,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +@@ -793,7 +769,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG + google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +@@ -801,13 +776,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -820,12 +794,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa + google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= + google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -838,7 +808,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +@@ -855,7 +824,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 + gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= + gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod +index 34a8604c4b0..3f5f3c64fc2 100644 +--- a/staging/src/k8s.io/sample-apiserver/go.mod ++++ b/staging/src/k8s.io/sample-apiserver/go.mod +@@ -23,7 +23,7 @@ require ( + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect +- github.com/cenkalti/backoff/v4 v4.1.3 // indirect ++ github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.4.0 // indirect +@@ -32,7 +32,7 @@ require ( + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect +@@ -42,10 +42,10 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.12.7 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/go-cmp v0.5.9 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect +- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect ++ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/josharian/intern v1.0.0 // indirect +@@ -66,23 +66,22 @@ require ( + go.etcd.io/etcd/api/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/v3 v3.5.7 // indirect +- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect +- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect +- go.opentelemetry.io/otel v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect +- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect +- go.opentelemetry.io/otel/metric v0.31.0 // indirect +- go.opentelemetry.io/otel/sdk v1.10.0 // indirect +- go.opentelemetry.io/otel/trace v1.10.0 // indirect +- go.opentelemetry.io/proto/otlp v0.19.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect ++ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect ++ go.opentelemetry.io/otel v1.20.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect ++ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect ++ go.opentelemetry.io/otel/metric v1.20.0 // indirect ++ go.opentelemetry.io/otel/sdk v1.20.0 // indirect ++ go.opentelemetry.io/otel/trace v1.20.0 // indirect ++ go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect +@@ -90,10 +89,10 @@ require ( + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.16.1 // indirect + google.golang.org/appengine v1.6.7 // indirect +- google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect +- google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect +- google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect +- google.golang.org/grpc v1.56.3 // indirect ++ google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect ++ google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect +diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum +index 948637febb9..100ed20040b 100644 +--- a/staging/src/k8s.io/sample-apiserver/go.sum ++++ b/staging/src/k8s.io/sample-apiserver/go.sum +@@ -13,144 +13,143 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV + cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= + cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= + cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +-cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +-cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +-cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +-cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +-cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +-cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +-cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +-cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +-cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +-cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +-cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +-cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +-cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +-cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +-cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +-cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= ++cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= ++cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= ++cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= ++cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= ++cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= ++cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= ++cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= ++cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= ++cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= ++cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= ++cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= ++cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= ++cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= ++cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= ++cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= ++cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= ++cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= ++cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= + cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= + cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= + cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= + cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= + cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= + cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +-cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +-cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +-cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +-cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +-cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +-cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +-cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +-cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +-cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= ++cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= ++cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= ++cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= ++cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= ++cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= ++cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= ++cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= ++cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= ++cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= ++cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= + cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= + cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +-cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +-cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +-cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +-cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +-cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +-cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +-cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +-cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +-cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +-cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +-cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= ++cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= ++cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= ++cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= ++cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= ++cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= ++cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= ++cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= ++cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= ++cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= ++cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= ++cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= + cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= + cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +-cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +-cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +-cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +-cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +-cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +-cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +-cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +-cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= ++cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= ++cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= ++cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= ++cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= ++cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= ++cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= ++cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= ++cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= + cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +-cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +-cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +-cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +-cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +-cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +-cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +-cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +-cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +-cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +-cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +-cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +-cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +-cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +-cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +-cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +-cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +-cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= ++cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= ++cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= ++cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= ++cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= ++cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= ++cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= ++cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= ++cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= ++cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= ++cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= ++cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= ++cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= ++cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= ++cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= ++cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= ++cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= ++cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= + cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +-cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +-cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +-cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +-cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +-cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +-cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +-cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +-cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +-cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +-cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +-cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +-cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +-cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +-cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +-cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +-cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +-cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +-cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +-cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= ++cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= ++cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= ++cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= ++cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= ++cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= ++cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= ++cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= ++cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= ++cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= ++cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= ++cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= ++cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= ++cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= ++cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= ++cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= ++cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= ++cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= ++cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= ++cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= + cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= + cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= + cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= + cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +-cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +-cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +-cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +-cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +-cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +-cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +-cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +-cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +-cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +-cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +-cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +-cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +-cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +-cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +-cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +-cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +-cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +-cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= ++cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= ++cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= ++cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= ++cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= ++cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= ++cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= ++cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= ++cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= ++cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= ++cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= ++cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= ++cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= ++cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= ++cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= ++cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= ++cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= ++cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= ++cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= + cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= + cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= + cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= + cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= + cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +-cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +-cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +-cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +-cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +-cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +-cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +-cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +-cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +-cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +-cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +-cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +-cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +-cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +-cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +-cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= ++cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= ++cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= ++cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= ++cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= ++cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= ++cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= ++cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= ++cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= ++cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= ++cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= ++cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= ++cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= ++cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= ++cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= ++cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= + dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= + github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +@@ -158,7 +157,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= + github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= + github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +@@ -178,11 +176,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= + github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= + github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +-github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= ++github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= ++github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= + github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +@@ -192,13 +189,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= + github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +@@ -219,20 +210,16 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry + github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +-github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= ++github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +-github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= ++github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= ++github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= + github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= + github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= + github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= + github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= + github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= + github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +@@ -247,8 +234,8 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= + github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= + github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= + github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +@@ -269,9 +256,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 + github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= + github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +-github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= ++github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= ++github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +@@ -319,9 +305,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ + github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +@@ -337,9 +322,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= + github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= + github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +@@ -351,8 +335,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= + github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= ++github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +@@ -382,8 +366,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -449,8 +433,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 + github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= + github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +@@ -459,7 +443,6 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 + github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= + github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= + github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= + github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +@@ -477,8 +460,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +@@ -509,27 +493,24 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= + go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +-go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +-go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +-go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= +-go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +-go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +-go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +-go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +-go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= ++go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= ++go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= ++go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= ++go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= ++go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= ++go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= ++go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= ++go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= ++go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= ++go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= ++go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= + go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= + go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= + go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +@@ -608,7 +589,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R + golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= + golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= + golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= + golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +@@ -620,10 +600,9 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -670,9 +649,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +@@ -689,7 +666,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= + golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= + golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +@@ -797,7 +773,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG + google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= + google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +@@ -805,13 +780,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D + google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= + google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M= +-google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE= +-google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= ++google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= ++google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= ++google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +@@ -824,12 +798,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa + google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= + google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= + google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +-google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +-google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= ++google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= ++google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= + google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +@@ -842,7 +812,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj + google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= + google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= + google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= + google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= + google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +@@ -859,7 +828,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 + gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= + gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +diff --git a/staging/src/k8s.io/sample-cli-plugin/go.mod b/staging/src/k8s.io/sample-cli-plugin/go.mod +index 6a5b5ef3b12..45ff820044c 100644 +--- a/staging/src/k8s.io/sample-cli-plugin/go.mod ++++ b/staging/src/k8s.io/sample-cli-plugin/go.mod +@@ -16,7 +16,7 @@ require ( + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/go-errors/errors v1.4.2 // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect +@@ -24,10 +24,10 @@ require ( + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect +@@ -44,7 +44,7 @@ require ( + github.com/xlab/treeprint v1.1.0 // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect +diff --git a/staging/src/k8s.io/sample-cli-plugin/go.sum b/staging/src/k8s.io/sample-cli-plugin/go.sum +index 2f2d42d0927..1471811fd2f 100644 +--- a/staging/src/k8s.io/sample-cli-plugin/go.sum ++++ b/staging/src/k8s.io/sample-cli-plugin/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +@@ -24,8 +25,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL + github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= + github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +@@ -60,16 +61,16 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +@@ -84,8 +85,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -115,8 +116,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= + github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +@@ -134,8 +135,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 + github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= + github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +@@ -165,8 +167,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +diff --git a/staging/src/k8s.io/sample-controller/go.mod b/staging/src/k8s.io/sample-controller/go.mod +index 5bcfb1305b2..47aa9a1e1a9 100644 +--- a/staging/src/k8s.io/sample-controller/go.mod ++++ b/staging/src/k8s.io/sample-controller/go.mod +@@ -16,7 +16,7 @@ require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect +- github.com/go-logr/logr v1.2.3 // indirect ++ github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect +@@ -24,9 +24,9 @@ require ( + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect +- github.com/google/go-cmp v0.5.9 // indirect ++ github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect +- github.com/google/uuid v1.3.0 // indirect ++ github.com/google/uuid v1.3.1 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect +@@ -38,7 +38,7 @@ require ( + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.24.0 // indirect +- golang.org/x/oauth2 v0.7.0 // indirect ++ golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect +diff --git a/staging/src/k8s.io/sample-controller/go.sum b/staging/src/k8s.io/sample-controller/go.sum +index 262c994c52e..e62121b441f 100644 +--- a/staging/src/k8s.io/sample-controller/go.sum ++++ b/staging/src/k8s.io/sample-controller/go.sum +@@ -1,5 +1,6 @@ + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +-cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= ++cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= ++cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +@@ -19,8 +20,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH + github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= + github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= ++github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= ++github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= + github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= + github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= + github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +@@ -56,15 +57,15 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= ++github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= ++github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= ++github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= ++github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= + github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +@@ -76,8 +77,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= + github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= ++github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= ++github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -104,8 +105,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= ++github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= ++github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= + github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +@@ -116,8 +117,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV + github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= + github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= + github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= ++github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= ++github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= + github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +@@ -145,8 +147,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY + golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= + golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= ++golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= ++golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go +index a5b020992b8..63955370032 100644 +--- a/vendor/cloud.google.com/go/compute/internal/version.go ++++ b/vendor/cloud.google.com/go/compute/internal/version.go +@@ -15,4 +15,4 @@ + package internal + + // Version is the current tagged release of the library. +-const Version = "1.19.1" ++const Version = "1.23.0" +diff --git a/vendor/github.com/cenkalti/backoff/v4/.travis.yml b/vendor/github.com/cenkalti/backoff/v4/.travis.yml +deleted file mode 100644 +index c79105c2fbe..00000000000 +--- a/vendor/github.com/cenkalti/backoff/v4/.travis.yml ++++ /dev/null +@@ -1,10 +0,0 @@ +-language: go +-go: +- - 1.13 +- - 1.x +- - tip +-before_install: +- - go get github.com/mattn/goveralls +- - go get golang.org/x/tools/cmd/cover +-script: +- - $HOME/gopath/bin/goveralls -service=travis-ci +diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go +index 1ce2507ebc8..b9c0c51cd75 100644 +--- a/vendor/github.com/cenkalti/backoff/v4/retry.go ++++ b/vendor/github.com/cenkalti/backoff/v4/retry.go +@@ -5,10 +5,20 @@ import ( + "time" + ) + ++// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). ++// The operation will be retried using a backoff policy if it returns an error. ++type OperationWithData[T any] func() (T, error) ++ + // An Operation is executing by Retry() or RetryNotify(). + // The operation will be retried using a backoff policy if it returns an error. + type Operation func() error + ++func (o Operation) withEmptyData() OperationWithData[struct{}] { ++ return func() (struct{}, error) { ++ return struct{}{}, o() ++ } ++} ++ + // Notify is a notify-on-error function. It receives an operation error and + // backoff delay if the operation failed (with an error). + // +@@ -28,18 +38,41 @@ func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) + } + ++// RetryWithData is like Retry but returns data in the response too. ++func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { ++ return RetryNotifyWithData(o, b, nil) ++} ++ + // RetryNotify calls notify function with the error and wait duration + // for each failed attempt before sleep. + func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) + } + ++// RetryNotifyWithData is like RetryNotify but returns data in the response too. ++func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { ++ return doRetryNotify(operation, b, notify, nil) ++} ++ + // RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer + // for each failed attempt before sleep. + // A default timer that uses system timer is used when nil is passed. + func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { +- var err error +- var next time.Duration ++ _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) ++ return err ++} ++ ++// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. ++func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { ++ return doRetryNotify(operation, b, notify, t) ++} ++ ++func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { ++ var ( ++ err error ++ next time.Duration ++ res T ++ ) + if t == nil { + t = &defaultTimer{} + } +@@ -52,21 +85,22 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer + + b.Reset() + for { +- if err = operation(); err == nil { +- return nil ++ res, err = operation() ++ if err == nil { ++ return res, nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { +- return permanent.Err ++ return res, permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { +- return cerr ++ return res, cerr + } + +- return err ++ return res, err + } + + if notify != nil { +@@ -77,7 +111,7 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer + + select { + case <-ctx.Done(): +- return ctx.Err() ++ return res, ctx.Err() + case <-t.C(): + } + } +diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml +index 94ff801df1a..0cffafa7bf9 100644 +--- a/vendor/github.com/go-logr/logr/.golangci.yaml ++++ b/vendor/github.com/go-logr/logr/.golangci.yaml +@@ -6,7 +6,6 @@ linters: + disable-all: true + enable: + - asciicheck +- - deadcode + - errcheck + - forcetypeassert + - gocritic +@@ -18,10 +17,8 @@ linters: + - misspell + - revive + - staticcheck +- - structcheck + - typecheck + - unused +- - varcheck + + issues: + exclude-use-default: false +diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md +index ab593118131..a8c29bfbd53 100644 +--- a/vendor/github.com/go-logr/logr/README.md ++++ b/vendor/github.com/go-logr/logr/README.md +@@ -1,6 +1,7 @@ + # A minimal logging API for Go + + [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) ++[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) + + logr offers an(other) opinion on how Go programs and libraries can do logging + without becoming coupled to a particular logging implementation. This is not +@@ -73,6 +74,29 @@ received: + If the Go standard library had defined an interface for logging, this project + probably would not be needed. Alas, here we are. + ++When the Go developers started developing such an interface with ++[slog](https://github.com/golang/go/issues/56345), they adopted some of the ++logr design but also left out some parts and changed others: ++ ++| Feature | logr | slog | ++|---------|------|------| ++| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) | ++| Low-level API | `LogSink` | `Handler` | ++| Stack unwinding | done by `LogSink` | done by `Logger` | ++| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) | ++| Generating a value for logging on demand | `Marshaler` | `LogValuer` | ++| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | ++| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | ++| Passing logger via context | `NewContext`, `FromContext` | no API | ++| Adding a name to a logger | `WithName` | no API | ++| Modify verbosity of log entries in a call chain | `V` | no API | ++| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | ++ ++The high-level slog API is explicitly meant to be one of many different APIs ++that can be layered on top of a shared `slog.Handler`. logr is one such ++alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) ++package. ++ + ### Inspiration + + Before you consider this package, please read [this blog post by the +@@ -118,6 +142,91 @@ There are implementations for the following logging libraries: + - **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) + - **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) + ++## slog interoperability ++ ++Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` ++and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and ++`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. ++As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level ++slog API. `slogr` itself leaves that to the caller. ++ ++## Using a `logr.Sink` as backend for slog ++ ++Ideally, a logr sink implementation should support both logr and slog by ++implementing both the normal logr interface(s) and `slogr.SlogSink`. Because ++of a conflict in the parameters of the common `Enabled` method, it is [not ++possible to implement both slog.Handler and logr.Sink in the same ++type](https://github.com/golang/go/issues/59110). ++ ++If both are supported, log calls can go from the high-level APIs to the backend ++without the need to convert parameters. `NewLogr` and `NewSlogHandler` can ++convert back and forth without adding additional wrappers, with one exception: ++when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then ++`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future ++log calls. ++ ++Such an implementation should also support values that implement specific ++interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, ++`slog.GroupValue`). logr does not convert those. ++ ++Not supporting slog has several drawbacks: ++- Recording source code locations works correctly if the handler gets called ++ through `slog.Logger`, but may be wrong in other cases. That's because a ++ `logr.Sink` does its own stack unwinding instead of using the program counter ++ provided by the high-level API. ++- slog levels <= 0 can be mapped to logr levels by negating the level without a ++ loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as ++ used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink ++ because logr does not support "more important than info" levels. ++- The slog group concept is supported by prefixing each key in a key/value ++ pair with the group names, separated by a dot. For structured output like ++ JSON it would be better to group the key/value pairs inside an object. ++- Special slog values and interfaces don't work as expected. ++- The overhead is likely to be higher. ++ ++These drawbacks are severe enough that applications using a mixture of slog and ++logr should switch to a different backend. ++ ++## Using a `slog.Handler` as backend for logr ++ ++Using a plain `slog.Handler` without support for logr works better than the ++other direction: ++- All logr verbosity levels can be mapped 1:1 to their corresponding slog level ++ by negating them. ++- Stack unwinding is done by the `slogr.SlogSink` and the resulting program ++ counter is passed to the `slog.Handler`. ++- Names added via `Logger.WithName` are gathered and recorded in an additional ++ attribute with `logger` as key and the names separated by slash as value. ++- `Logger.Error` is turned into a log record with `slog.LevelError` as level ++ and an additional attribute with `err` as key, if an error was provided. ++ ++The main drawback is that `logr.Marshaler` will not be supported. Types should ++ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility ++with logr implementations without slog support is not important, then ++`slog.Valuer` is sufficient. ++ ++## Context support for slog ++ ++Storing a logger in a `context.Context` is not supported by ++slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this ++to fill this gap: ++ ++ func HandlerFromContext(ctx context.Context) slog.Handler { ++ logger, err := logr.FromContext(ctx) ++ if err == nil { ++ return slogr.NewSlogHandler(logger) ++ } ++ return slog.Default().Handler() ++ } ++ ++ func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { ++ return logr.NewContext(ctx, slogr.NewLogr(handler)) ++ } ++ ++The downside is that storing and retrieving a `slog.Handler` needs more ++allocations compared to using a `logr.Logger`. Therefore the recommendation is ++to use the `logr.Logger` API in code which uses contextual logging. ++ + ## FAQ + + ### Conceptual +@@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this", + + Then gradually choose levels in between as you need them, working your way + down from 10 (for debug and trace style logs) and up from 1 (for chattier +-info-type logs.) ++info-type logs). For reference, slog pre-defines -4 for debug logs ++(corresponds to 4 in logr), which matches what is ++[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). + + #### How do I choose my keys? + +diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md +new file mode 100644 +index 00000000000..1ca756fc7b3 +--- /dev/null ++++ b/vendor/github.com/go-logr/logr/SECURITY.md +@@ -0,0 +1,18 @@ ++# Security Policy ++ ++If you have discovered a security vulnerability in this project, please report it ++privately. **Do not disclose it as a public issue.** This gives us time to work with you ++to fix the issue before public exposure, reducing the chance that the exploit will be ++used before a patch is released. ++ ++You may submit the report in the following ways: ++ ++- send an email to go-logr-security@googlegroups.com ++- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) ++ ++Please provide the following information in your report: ++ ++- A description of the vulnerability and its impact ++- How to reproduce the issue ++ ++We ask that you give us 90 days to work on a fix before public exposure. +diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go +index 9d92a38f1d7..99fe8be93c1 100644 +--- a/vendor/github.com/go-logr/logr/discard.go ++++ b/vendor/github.com/go-logr/logr/discard.go +@@ -20,35 +20,5 @@ package logr + // used whenever the caller is not interested in the logs. Logger instances + // produced by this function always compare as equal. + func Discard() Logger { +- return Logger{ +- level: 0, +- sink: discardLogSink{}, +- } +-} +- +-// discardLogSink is a LogSink that discards all messages. +-type discardLogSink struct{} +- +-// Verify that it actually implements the interface +-var _ LogSink = discardLogSink{} +- +-func (l discardLogSink) Init(RuntimeInfo) { +-} +- +-func (l discardLogSink) Enabled(int) bool { +- return false +-} +- +-func (l discardLogSink) Info(int, string, ...interface{}) { +-} +- +-func (l discardLogSink) Error(error, string, ...interface{}) { +-} +- +-func (l discardLogSink) WithValues(...interface{}) LogSink { +- return l +-} +- +-func (l discardLogSink) WithName(string) LogSink { +- return l ++ return New(nil) + } +diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go +index 7accdb0c400..12e5807cc5c 100644 +--- a/vendor/github.com/go-logr/logr/funcr/funcr.go ++++ b/vendor/github.com/go-logr/logr/funcr/funcr.go +@@ -21,13 +21,13 @@ limitations under the License. + // github.com/go-logr/logr.LogSink with output through an arbitrary + // "write" function. See New and NewJSON for details. + // +-// Custom LogSinks ++// # Custom LogSinks + // + // For users who need more control, a funcr.Formatter can be embedded inside + // your own custom LogSink implementation. This is useful when the LogSink + // needs to implement additional methods, for example. + // +-// Formatting ++// # Formatting + // + // This will respect logr.Marshaler, fmt.Stringer, and error interfaces for + // values which are being logged. When rendering a struct, funcr will use Go's +@@ -37,6 +37,7 @@ package funcr + import ( + "bytes" + "encoding" ++ "encoding/json" + "fmt" + "path/filepath" + "reflect" +@@ -115,17 +116,17 @@ type Options struct { + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). +- RenderBuiltinsHook func(kvList []interface{}) []interface{} ++ RenderBuiltinsHook func(kvList []any) []any + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. +- RenderValuesHook func(kvList []interface{}) []interface{} ++ RenderValuesHook func(kvList []any) []any + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. +- RenderArgsHook func(kvList []interface{}) []interface{} ++ RenderArgsHook func(kvList []any) []any + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, +@@ -162,7 +163,7 @@ func (l fnlogger) WithName(name string) logr.LogSink { + return &l + } + +-func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { ++func (l fnlogger) WithValues(kvList ...any) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l + } +@@ -172,12 +173,12 @@ func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + return &l + } + +-func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { ++func (l fnlogger) Info(level int, msg string, kvList ...any) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) + } + +-func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { ++func (l fnlogger) Error(err error, msg string, kvList ...any) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) + } +@@ -217,7 +218,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { + prefix: "", + values: nil, + depth: 0, +- opts: opts, ++ opts: &opts, + } + return f + } +@@ -228,10 +229,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { + type Formatter struct { + outputFormat outputFormat + prefix string +- values []interface{} ++ values []any + valuesStr string + depth int +- opts Options ++ opts *Options + } + + // outputFormat indicates which outputFormat to use. +@@ -245,10 +246,10 @@ const ( + ) + + // PseudoStruct is a list of key-value pairs that gets logged as a struct. +-type PseudoStruct []interface{} ++type PseudoStruct []any + + // render produces a log line, ready to use. +-func (f Formatter) render(builtins, args []interface{}) string { ++func (f Formatter) render(builtins, args []any) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { +@@ -291,7 +292,7 @@ func (f Formatter) render(builtins, args []interface{}) string { + // This function returns a potentially modified version of kvList, which + // ensures that there is a value for every key (adding a value if needed) and + // that each key is a string (substituting a key if needed). +-func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { ++func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { +@@ -333,7 +334,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b + return kvList + } + +-func (f Formatter) pretty(value interface{}) string { ++func (f Formatter) pretty(value any) string { + return f.prettyWithFlags(value, 0, 0) + } + +@@ -342,7 +343,7 @@ const ( + ) + + // TODO: This is not fast. Most of the overhead goes here. +-func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { ++func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } +@@ -447,6 +448,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } ++ printComma := false // testing i>0 is not enough because of JSON omitted fields + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { +@@ -478,9 +480,10 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s + if omitempty && isEmpty(v.Field(i)) { + continue + } +- if i > 0 { ++ if printComma { + buf.WriteByte(',') + } ++ printComma = true // if we got here, we are rendering a field + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue +@@ -500,6 +503,20 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s + } + return buf.String() + case reflect.Slice, reflect.Array: ++ // If this is outputing as JSON make sure this isn't really a json.RawMessage. ++ // If so just emit "as-is" and don't pretty it as that will just print ++ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. ++ if f.outputFormat == outputJSON { ++ if rm, ok := value.(json.RawMessage); ok { ++ // If it's empty make sure we emit an empty value as the array style would below. ++ if len(rm) > 0 { ++ buf.Write(rm) ++ } else { ++ buf.WriteString("null") ++ } ++ return buf.String() ++ } ++ } + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { +@@ -597,7 +614,7 @@ func isEmpty(v reflect.Value) bool { + return false + } + +-func invokeMarshaler(m logr.Marshaler) (ret interface{}) { ++func invokeMarshaler(m logr.Marshaler) (ret any) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) +@@ -658,12 +675,12 @@ func (f Formatter) caller() Caller { + + const noValue = "" + +-func (f Formatter) nonStringKey(v interface{}) string { ++func (f Formatter) nonStringKey(v any) string { + return fmt.Sprintf("", f.snippet(v)) + } + + // snippet produces a short snippet string of an arbitrary value. +-func (f Formatter) snippet(v interface{}) string { ++func (f Formatter) snippet(v any) string { + const snipLen = 16 + + snip := f.pretty(v) +@@ -676,7 +693,7 @@ func (f Formatter) snippet(v interface{}) string { + // sanitize ensures that a list of key-value pairs has a value for every key + // (adding a value if needed) and that each key is a string (substituting a key + // if needed). +-func (f Formatter) sanitize(kvList []interface{}) []interface{} { ++func (f Formatter) sanitize(kvList []any) []any { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } +@@ -710,8 +727,8 @@ func (f Formatter) GetDepth() int { + // FormatInfo renders an Info log message into strings. The prefix will be + // empty when no names were set (via AddNames), or when the output is + // configured for JSON. +-func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { +- args := make([]interface{}, 0, 64) // using a constant here impacts perf ++func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { ++ args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) +@@ -728,10 +745,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref + } + + // FormatError renders an Error log message into strings. The prefix will be +-// empty when no names were set (via AddNames), or when the output is ++// empty when no names were set (via AddNames), or when the output is + // configured for JSON. +-func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { +- args := make([]interface{}, 0, 64) // using a constant here impacts perf ++func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { ++ args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) +@@ -744,12 +761,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) +- var loggableErr interface{} ++ var loggableErr any + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) +- return f.prefix, f.render(args, kvList) ++ return prefix, f.render(args, kvList) + } + + // AddName appends the specified name. funcr uses '/' characters to separate +@@ -764,7 +781,7 @@ func (f *Formatter) AddName(name string) { + + // AddValues adds key-value pairs to the set of saved values to be logged with + // each log line. +-func (f *Formatter) AddValues(kvList []interface{}) { ++func (f *Formatter) AddValues(kvList []any) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) +diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go +index c3b56b3d2c5..2a5075a180f 100644 +--- a/vendor/github.com/go-logr/logr/logr.go ++++ b/vendor/github.com/go-logr/logr/logr.go +@@ -21,7 +21,7 @@ limitations under the License. + // to back that API. Packages in the Go ecosystem can depend on this package, + // while callers can implement logging with whatever backend is appropriate. + // +-// Usage ++// # Usage + // + // Logging is done using a Logger instance. Logger is a concrete type with + // methods, which defers the actual logging to a LogSink interface. The main +@@ -30,16 +30,20 @@ limitations under the License. + // "structured logging". + // + // With Go's standard log package, we might write: +-// log.Printf("setting target value %s", targetValue) ++// ++// log.Printf("setting target value %s", targetValue) + // + // With logr's structured logging, we'd write: +-// logger.Info("setting target", "value", targetValue) ++// ++// logger.Info("setting target", "value", targetValue) + // + // Errors are much the same. Instead of: +-// log.Printf("failed to open the pod bay door for user %s: %v", user, err) ++// ++// log.Printf("failed to open the pod bay door for user %s: %v", user, err) + // + // We'd write: +-// logger.Error(err, "failed to open the pod bay door", "user", user) ++// ++// logger.Error(err, "failed to open the pod bay door", "user", user) + // + // Info() and Error() are very similar, but they are separate methods so that + // LogSink implementations can choose to do things like attach additional +@@ -47,7 +51,7 @@ limitations under the License. + // always logged, regardless of the current verbosity. If there is no error + // instance available, passing nil is valid. + // +-// Verbosity ++// # Verbosity + // + // Often we want to log information only when the application in "verbose + // mode". To write log lines that are more verbose, Logger has a V() method. +@@ -58,20 +62,22 @@ limitations under the License. + // Error messages do not have a verbosity level and are always logged. + // + // Where we might have written: +-// if flVerbose >= 2 { +-// log.Printf("an unusual thing happened") +-// } ++// ++// if flVerbose >= 2 { ++// log.Printf("an unusual thing happened") ++// } + // + // We can write: +-// logger.V(2).Info("an unusual thing happened") + // +-// Logger Names ++// logger.V(2).Info("an unusual thing happened") ++// ++// # Logger Names + // + // Logger instances can have name strings so that all messages logged through + // that instance have additional context. For example, you might want to add + // a subsystem name: + // +-// logger.WithName("compactor").Info("started", "time", time.Now()) ++// logger.WithName("compactor").Info("started", "time", time.Now()) + // + // The WithName() method returns a new Logger, which can be passed to + // constructors or other functions for further use. Repeated use of WithName() +@@ -82,25 +88,27 @@ limitations under the License. + // joining operation (e.g. whitespace, commas, periods, slashes, brackets, + // quotes, etc). + // +-// Saved Values ++// # Saved Values + // + // Logger instances can store any number of key/value pairs, which will be + // logged alongside all messages logged through that instance. For example, + // you might want to create a Logger instance per managed object: + // + // With the standard log package, we might write: +-// log.Printf("decided to set field foo to value %q for object %s/%s", +-// targetValue, object.Namespace, object.Name) ++// ++// log.Printf("decided to set field foo to value %q for object %s/%s", ++// targetValue, object.Namespace, object.Name) + // + // With logr we'd write: +-// // Elsewhere: set up the logger to log the object name. +-// obj.logger = mainLogger.WithValues( +-// "name", obj.name, "namespace", obj.namespace) + // +-// // later on... +-// obj.logger.Info("setting foo", "value", targetValue) ++// // Elsewhere: set up the logger to log the object name. ++// obj.logger = mainLogger.WithValues( ++// "name", obj.name, "namespace", obj.namespace) ++// ++// // later on... ++// obj.logger.Info("setting foo", "value", targetValue) + // +-// Best Practices ++// # Best Practices + // + // Logger has very few hard rules, with the goal that LogSink implementations + // might have a lot of freedom to differentiate. There are, however, some +@@ -119,20 +127,20 @@ limitations under the License. + // such a value can call its methods without having to check whether the + // instance is ready for use. + // +-// Calling methods with the null logger (Logger{}) as instance will crash +-// because it has no LogSink. Therefore this null logger should never be passed +-// around. For cases where passing a logger is optional, a pointer to Logger ++// The zero logger (= Logger{}) is identical to Discard() and discards all log ++// entries. Code that receives a Logger by value can simply call it, the methods ++// will never crash. For cases where passing a logger is optional, a pointer to Logger + // should be used. + // +-// Key Naming Conventions ++// # Key Naming Conventions + // + // Keys are not strictly required to conform to any specification or regex, but + // it is recommended that they: +-// * be human-readable and meaningful (not auto-generated or simple ordinals) +-// * be constant (not dependent on input data) +-// * contain only printable characters +-// * not contain whitespace or punctuation +-// * use lower case for simple keys and lowerCamelCase for more complex ones ++// - be human-readable and meaningful (not auto-generated or simple ordinals) ++// - be constant (not dependent on input data) ++// - contain only printable characters ++// - not contain whitespace or punctuation ++// - use lower case for simple keys and lowerCamelCase for more complex ones + // + // These guidelines help ensure that log data is processed properly regardless + // of the log implementation. For example, log implementations will try to +@@ -141,51 +149,54 @@ limitations under the License. + // While users are generally free to use key names of their choice, it's + // generally best to avoid using the following keys, as they're frequently used + // by implementations: +-// * "caller": the calling information (file/line) of a particular log line +-// * "error": the underlying error value in the `Error` method +-// * "level": the log level +-// * "logger": the name of the associated logger +-// * "msg": the log message +-// * "stacktrace": the stack trace associated with a particular log line or +-// error (often from the `Error` message) +-// * "ts": the timestamp for a log line ++// - "caller": the calling information (file/line) of a particular log line ++// - "error": the underlying error value in the `Error` method ++// - "level": the log level ++// - "logger": the name of the associated logger ++// - "msg": the log message ++// - "stacktrace": the stack trace associated with a particular log line or ++// error (often from the `Error` message) ++// - "ts": the timestamp for a log line + // + // Implementations are encouraged to make use of these keys to represent the + // above concepts, when necessary (for example, in a pure-JSON output form, it + // would be necessary to represent at least message and timestamp as ordinary + // named values). + // +-// Break Glass ++// # Break Glass + // + // Implementations may choose to give callers access to the underlying + // logging implementation. The recommended pattern for this is: +-// // Underlier exposes access to the underlying logging implementation. +-// // Since callers only have a logr.Logger, they have to know which +-// // implementation is in use, so this interface is less of an abstraction +-// // and more of way to test type conversion. +-// type Underlier interface { +-// GetUnderlying() +-// } ++// ++// // Underlier exposes access to the underlying logging implementation. ++// // Since callers only have a logr.Logger, they have to know which ++// // implementation is in use, so this interface is less of an abstraction ++// // and more of way to test type conversion. ++// type Underlier interface { ++// GetUnderlying() ++// } + // + // Logger grants access to the sink to enable type assertions like this: +-// func DoSomethingWithImpl(log logr.Logger) { +-// if underlier, ok := log.GetSink()(impl.Underlier) { +-// implLogger := underlier.GetUnderlying() +-// ... +-// } +-// } ++// ++// func DoSomethingWithImpl(log logr.Logger) { ++// if underlier, ok := log.GetSink().(impl.Underlier); ok { ++// implLogger := underlier.GetUnderlying() ++// ... ++// } ++// } + // + // Custom `With*` functions can be implemented by copying the complete + // Logger struct and replacing the sink in the copy: +-// // WithFooBar changes the foobar parameter in the log sink and returns a +-// // new logger with that modified sink. It does nothing for loggers where +-// // the sink doesn't support that parameter. +-// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +-// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { +-// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +-// } +-// return log +-// } ++// ++// // WithFooBar changes the foobar parameter in the log sink and returns a ++// // new logger with that modified sink. It does nothing for loggers where ++// // the sink doesn't support that parameter. ++// func WithFoobar(log logr.Logger, foobar int) logr.Logger { ++// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { ++// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) ++// } ++// return log ++// } + // + // Don't use New to construct a new Logger with a LogSink retrieved from an + // existing Logger. Source code attribution might not work correctly and +@@ -201,11 +212,14 @@ import ( + ) + + // New returns a new Logger instance. This is primarily used by libraries +-// implementing LogSink, rather than end users. ++// implementing LogSink, rather than end users. Passing a nil sink will create ++// a Logger which discards all log lines. + func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) +- sink.Init(runtimeInfo) ++ if sink != nil { ++ sink.Init(runtimeInfo) ++ } + return logger + } + +@@ -244,7 +258,13 @@ type Logger struct { + // Enabled tests whether this Logger is enabled. For example, commandline + // flags might be used to set the logging verbosity and disable some info logs. + func (l Logger) Enabled() bool { +- return l.sink.Enabled(l.level) ++ // Some implementations of LogSink look at the caller in Enabled (e.g. ++ // different verbosity levels per package or file), but we only pass one ++ // CallDepth in (via Init). This means that all calls from Logger to the ++ // LogSink's Enabled, Info, and Error methods must have the same number of ++ // frames. In other words, Logger methods can't call other Logger methods ++ // which call these LogSink methods unless we do it the same in all paths. ++ return l.sink != nil && l.sink.Enabled(l.level) + } + + // Info logs a non-error message with the given key/value pairs as context. +@@ -253,8 +273,11 @@ func (l Logger) Enabled() bool { + // line. The key/value pairs can then be used to add additional variable + // information. The key/value pairs must alternate string keys and arbitrary + // values. +-func (l Logger) Info(msg string, keysAndValues ...interface{}) { +- if l.Enabled() { ++func (l Logger) Info(msg string, keysAndValues ...any) { ++ if l.sink == nil { ++ return ++ } ++ if l.sink.Enabled(l.level) { // see comment in Enabled + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } +@@ -272,7 +295,10 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { + // while the err argument should be used to attach the actual error that + // triggered this log line, if present. The err parameter is optional + // and nil may be passed instead of an error instance. +-func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { ++func (l Logger) Error(err error, msg string, keysAndValues ...any) { ++ if l.sink == nil { ++ return ++ } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } +@@ -284,6 +310,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + // level means a log message is less important. Negative V-levels are treated + // as 0. + func (l Logger) V(level int) Logger { ++ if l.sink == nil { ++ return l ++ } + if level < 0 { + level = 0 + } +@@ -291,9 +320,19 @@ func (l Logger) V(level int) Logger { + return l + } + ++// GetV returns the verbosity level of the logger. If the logger's LogSink is ++// nil as in the Discard logger, this will always return 0. ++func (l Logger) GetV() int { ++ // 0 if l.sink nil because of the if check in V above. ++ return l.level ++} ++ + // WithValues returns a new Logger instance with additional key/value pairs. + // See Info for documentation on how key/value pairs work. +-func (l Logger) WithValues(keysAndValues ...interface{}) Logger { ++func (l Logger) WithValues(keysAndValues ...any) Logger { ++ if l.sink == nil { ++ return l ++ } + l.setSink(l.sink.WithValues(keysAndValues...)) + return l + } +@@ -304,6 +343,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + // contain only letters, digits, and hyphens (see the package documentation for + // more information). + func (l Logger) WithName(name string) Logger { ++ if l.sink == nil { ++ return l ++ } + l.setSink(l.sink.WithName(name)) + return l + } +@@ -324,6 +366,9 @@ func (l Logger) WithName(name string) Logger { + // WithCallDepth(1) because it works with implementions that support the + // CallDepthLogSink and/or CallStackHelperLogSink interfaces. + func (l Logger) WithCallDepth(depth int) Logger { ++ if l.sink == nil { ++ return l ++ } + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } +@@ -345,6 +390,9 @@ func (l Logger) WithCallDepth(depth int) Logger { + // implementation does not support either of these, the original Logger will be + // returned. + func (l Logger) WithCallStackHelper() (func(), Logger) { ++ if l.sink == nil { ++ return func() {}, l ++ } + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) +@@ -357,6 +405,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) { + return helper, l + } + ++// IsZero returns true if this logger is an uninitialized zero value ++func (l Logger) IsZero() bool { ++ return l.sink == nil ++} ++ + // contextKey is how we find Loggers in a context.Context. + type contextKey struct{} + +@@ -427,22 +480,22 @@ type LogSink interface { + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. +- Info(level int, msg string, keysAndValues ...interface{}) ++ Info(level int, msg string, keysAndValues ...any) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. +- Error(err error, msg string, keysAndValues ...interface{}) ++ Error(err error, msg string, keysAndValues ...any) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. +- WithValues(keysAndValues ...interface{}) LogSink ++ WithValues(keysAndValues ...any) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink + } + +-// CallDepthLogSink represents a Logger that knows how to climb the call stack ++// CallDepthLogSink represents a LogSink that knows how to climb the call stack + // to identify the original call site and can offset the depth by a specified + // number of frames. This is useful for users who have helper functions + // between the "real" call site and the actual calls to Logger methods. +@@ -467,7 +520,7 @@ type CallDepthLogSink interface { + WithCallDepth(depth int) LogSink + } + +-// CallStackHelperLogSink represents a Logger that knows how to climb ++// CallStackHelperLogSink represents a LogSink that knows how to climb + // the call stack to identify the original call site and can skip + // intermediate helper functions if they mark themselves as + // helper. Go's testing package uses that approach. +@@ -506,5 +559,5 @@ type Marshaler interface { + // with exported fields + // + // It may return any value of any type. +- MarshalLog() interface{} ++ MarshalLog() any + } +diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go +index e54a76c7e3c..3d8d0cd3ae3 100644 +--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go ++++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go +@@ -7,6 +7,7 @@ package cmpopts + + import ( + "errors" ++ "fmt" + "math" + "reflect" + "time" +@@ -16,10 +17,10 @@ import ( + + func equateAlways(_, _ interface{}) bool { return true } + +-// EquateEmpty returns a Comparer option that determines all maps and slices ++// EquateEmpty returns a [cmp.Comparer] option that determines all maps and slices + // with a length of zero to be equal, regardless of whether they are nil. + // +-// EquateEmpty can be used in conjunction with SortSlices and SortMaps. ++// EquateEmpty can be used in conjunction with [SortSlices] and [SortMaps]. + func EquateEmpty() cmp.Option { + return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways)) + } +@@ -31,7 +32,7 @@ func isEmpty(x, y interface{}) bool { + (vx.Len() == 0 && vy.Len() == 0) + } + +-// EquateApprox returns a Comparer option that determines float32 or float64 ++// EquateApprox returns a [cmp.Comparer] option that determines float32 or float64 + // values to be equal if they are within a relative fraction or absolute margin. + // This option is not used when either x or y is NaN or infinite. + // +@@ -45,7 +46,7 @@ func isEmpty(x, y interface{}) bool { + // + // |x-y| ≤ max(fraction*min(|x|, |y|), margin) + // +-// EquateApprox can be used in conjunction with EquateNaNs. ++// EquateApprox can be used in conjunction with [EquateNaNs]. + func EquateApprox(fraction, margin float64) cmp.Option { + if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) { + panic("margin or fraction must be a non-negative number") +@@ -73,10 +74,10 @@ func (a approximator) compareF32(x, y float32) bool { + return a.compareF64(float64(x), float64(y)) + } + +-// EquateNaNs returns a Comparer option that determines float32 and float64 ++// EquateNaNs returns a [cmp.Comparer] option that determines float32 and float64 + // NaN values to be equal. + // +-// EquateNaNs can be used in conjunction with EquateApprox. ++// EquateNaNs can be used in conjunction with [EquateApprox]. + func EquateNaNs() cmp.Option { + return cmp.Options{ + cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)), +@@ -91,8 +92,8 @@ func areNaNsF32s(x, y float32) bool { + return areNaNsF64s(float64(x), float64(y)) + } + +-// EquateApproxTime returns a Comparer option that determines two non-zero +-// time.Time values to be equal if they are within some margin of one another. ++// EquateApproxTime returns a [cmp.Comparer] option that determines two non-zero ++// [time.Time] values to be equal if they are within some margin of one another. + // If both times have a monotonic clock reading, then the monotonic time + // difference will be used. The margin must be non-negative. + func EquateApproxTime(margin time.Duration) cmp.Option { +@@ -131,8 +132,8 @@ type anyError struct{} + func (anyError) Error() string { return "any error" } + func (anyError) Is(err error) bool { return err != nil } + +-// EquateErrors returns a Comparer option that determines errors to be equal +-// if errors.Is reports them to match. The AnyError error can be used to ++// EquateErrors returns a [cmp.Comparer] option that determines errors to be equal ++// if [errors.Is] reports them to match. The [AnyError] error can be used to + // match any non-nil error. + func EquateErrors() cmp.Option { + return cmp.FilterValues(areConcreteErrors, cmp.Comparer(compareErrors)) +@@ -154,3 +155,31 @@ func compareErrors(x, y interface{}) bool { + ye := y.(error) + return errors.Is(xe, ye) || errors.Is(ye, xe) + } ++ ++// EquateComparable returns a [cmp.Option] that determines equality ++// of comparable types by directly comparing them using the == operator in Go. ++// The types to compare are specified by passing a value of that type. ++// This option should only be used on types that are documented as being ++// safe for direct == comparison. For example, [net/netip.Addr] is documented ++// as being semantically safe to use with ==, while [time.Time] is documented ++// to discourage the use of == on time values. ++func EquateComparable(typs ...interface{}) cmp.Option { ++ types := make(typesFilter) ++ for _, typ := range typs { ++ switch t := reflect.TypeOf(typ); { ++ case !t.Comparable(): ++ panic(fmt.Sprintf("%T is not a comparable Go type", typ)) ++ case types[t]: ++ panic(fmt.Sprintf("%T is already specified", typ)) ++ default: ++ types[t] = true ++ } ++ } ++ return cmp.FilterPath(types.filter, cmp.Comparer(equateAny)) ++} ++ ++type typesFilter map[reflect.Type]bool ++ ++func (tf typesFilter) filter(p cmp.Path) bool { return tf[p.Last().Type()] } ++ ++func equateAny(x, y interface{}) bool { return x == y } +diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go +index 80c60617e40..fb84d11d70e 100644 +--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go ++++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go +@@ -14,7 +14,7 @@ import ( + "github.com/google/go-cmp/cmp/internal/function" + ) + +-// IgnoreFields returns an Option that ignores fields of the ++// IgnoreFields returns an [cmp.Option] that ignores fields of the + // given names on a single struct type. It respects the names of exported fields + // that are forwarded due to struct embedding. + // The struct type is specified by passing in a value of that type. +@@ -26,7 +26,7 @@ func IgnoreFields(typ interface{}, names ...string) cmp.Option { + return cmp.FilterPath(sf.filter, cmp.Ignore()) + } + +-// IgnoreTypes returns an Option that ignores all values assignable to ++// IgnoreTypes returns an [cmp.Option] that ignores all values assignable to + // certain types, which are specified by passing in a value of each type. + func IgnoreTypes(typs ...interface{}) cmp.Option { + tf := newTypeFilter(typs...) +@@ -59,10 +59,10 @@ func (tf typeFilter) filter(p cmp.Path) bool { + return false + } + +-// IgnoreInterfaces returns an Option that ignores all values or references of ++// IgnoreInterfaces returns an [cmp.Option] that ignores all values or references of + // values assignable to certain interface types. These interfaces are specified + // by passing in an anonymous struct with the interface types embedded in it. +-// For example, to ignore sync.Locker, pass in struct{sync.Locker}{}. ++// For example, to ignore [sync.Locker], pass in struct{sync.Locker}{}. + func IgnoreInterfaces(ifaces interface{}) cmp.Option { + tf := newIfaceFilter(ifaces) + return cmp.FilterPath(tf.filter, cmp.Ignore()) +@@ -107,7 +107,7 @@ func (tf ifaceFilter) filter(p cmp.Path) bool { + return false + } + +-// IgnoreUnexported returns an Option that only ignores the immediate unexported ++// IgnoreUnexported returns an [cmp.Option] that only ignores the immediate unexported + // fields of a struct, including anonymous fields of unexported types. + // In particular, unexported fields within the struct's exported fields + // of struct types, including anonymous fields, will not be ignored unless the +@@ -115,7 +115,7 @@ func (tf ifaceFilter) filter(p cmp.Path) bool { + // + // Avoid ignoring unexported fields of a type which you do not control (i.e. a + // type from another repository), as changes to the implementation of such types +-// may change how the comparison behaves. Prefer a custom Comparer instead. ++// may change how the comparison behaves. Prefer a custom [cmp.Comparer] instead. + func IgnoreUnexported(typs ...interface{}) cmp.Option { + ux := newUnexportedFilter(typs...) + return cmp.FilterPath(ux.filter, cmp.Ignore()) +@@ -148,7 +148,7 @@ func isExported(id string) bool { + return unicode.IsUpper(r) + } + +-// IgnoreSliceElements returns an Option that ignores elements of []V. ++// IgnoreSliceElements returns an [cmp.Option] that ignores elements of []V. + // The discard function must be of the form "func(T) bool" which is used to + // ignore slice elements of type V, where V is assignable to T. + // Elements are ignored if the function reports true. +@@ -176,7 +176,7 @@ func IgnoreSliceElements(discardFunc interface{}) cmp.Option { + }, cmp.Ignore()) + } + +-// IgnoreMapEntries returns an Option that ignores entries of map[K]V. ++// IgnoreMapEntries returns an [cmp.Option] that ignores entries of map[K]V. + // The discard function must be of the form "func(T, R) bool" which is used to + // ignore map entries of type K and V, where K and V are assignable to T and R. + // Entries are ignored if the function reports true. +diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go +index 0eb2a758c23..c6d09dae402 100644 +--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go ++++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go +@@ -13,7 +13,7 @@ import ( + "github.com/google/go-cmp/cmp/internal/function" + ) + +-// SortSlices returns a Transformer option that sorts all []V. ++// SortSlices returns a [cmp.Transformer] option that sorts all []V. + // The less function must be of the form "func(T, T) bool" which is used to + // sort any slice with element type V that is assignable to T. + // +@@ -25,7 +25,7 @@ import ( + // The less function does not have to be "total". That is, if !less(x, y) and + // !less(y, x) for two elements x and y, their relative order is maintained. + // +-// SortSlices can be used in conjunction with EquateEmpty. ++// SortSlices can be used in conjunction with [EquateEmpty]. + func SortSlices(lessFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessFunc) + if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { +@@ -82,13 +82,13 @@ func (ss sliceSorter) less(v reflect.Value, i, j int) bool { + return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool() + } + +-// SortMaps returns a Transformer option that flattens map[K]V types to be a ++// SortMaps returns a [cmp.Transformer] option that flattens map[K]V types to be a + // sorted []struct{K, V}. The less function must be of the form + // "func(T, T) bool" which is used to sort any map with key K that is + // assignable to T. + // +-// Flattening the map into a slice has the property that cmp.Equal is able to +-// use Comparers on K or the K.Equal method if it exists. ++// Flattening the map into a slice has the property that [cmp.Equal] is able to ++// use [cmp.Comparer] options on K or the K.Equal method if it exists. + // + // The less function must be: + // - Deterministic: less(x, y) == less(x, y) +@@ -96,7 +96,7 @@ func (ss sliceSorter) less(v reflect.Value, i, j int) bool { + // - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) + // - Total: if x != y, then either less(x, y) or less(y, x) + // +-// SortMaps can be used in conjunction with EquateEmpty. ++// SortMaps can be used in conjunction with [EquateEmpty]. + func SortMaps(lessFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessFunc) + if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { +diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go +index 8812443a2f5..25b4bd05bd7 100644 +--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go ++++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go +@@ -19,7 +19,7 @@ func (xf xformFilter) filter(p cmp.Path) bool { + return true + } + +-// AcyclicTransformer returns a Transformer with a filter applied that ensures ++// AcyclicTransformer returns a [cmp.Transformer] with a filter applied that ensures + // that the transformer cannot be recursively applied upon its own output. + // + // An example use case is a transformer that splits a string by lines: +@@ -28,7 +28,7 @@ func (xf xformFilter) filter(p cmp.Path) bool { + // return strings.Split(s, "\n") + // }) + // +-// Had this been an unfiltered Transformer instead, this would result in an ++// Had this been an unfiltered [cmp.Transformer] instead, this would result in an + // infinite cycle converting a string to []string to [][]string and so on. + func AcyclicTransformer(name string, xformFunc interface{}) cmp.Option { + xf := xformFilter{cmp.Transformer(name, xformFunc)} +diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go +index 087320da7f0..0f5b8a48c6b 100644 +--- a/vendor/github.com/google/go-cmp/cmp/compare.go ++++ b/vendor/github.com/google/go-cmp/cmp/compare.go +@@ -5,7 +5,7 @@ + // Package cmp determines equality of values. + // + // This package is intended to be a more powerful and safer alternative to +-// reflect.DeepEqual for comparing whether two values are semantically equal. ++// [reflect.DeepEqual] for comparing whether two values are semantically equal. + // It is intended to only be used in tests, as performance is not a goal and + // it may panic if it cannot compare the values. Its propensity towards + // panicking means that its unsuitable for production environments where a +@@ -18,16 +18,17 @@ + // For example, an equality function may report floats as equal so long as + // they are within some tolerance of each other. + // +-// - Types with an Equal method may use that method to determine equality. +-// This allows package authors to determine the equality operation +-// for the types that they define. ++// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method ++// to determine equality. This allows package authors to determine ++// the equality operation for the types that they define. + // + // - If no custom equality functions are used and no Equal method is defined, + // equality is determined by recursively comparing the primitive kinds on +-// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, ++// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual], + // unexported fields are not compared by default; they result in panics +-// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) +-// or explicitly compared using the Exporter option. ++// unless suppressed by using an [Ignore] option ++// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ++// or explicitly compared using the [Exporter] option. + package cmp + + import ( +@@ -45,14 +46,14 @@ import ( + // Equal reports whether x and y are equal by recursively applying the + // following rules in the given order to x and y and all of their sub-values: + // +-// - Let S be the set of all Ignore, Transformer, and Comparer options that ++// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that + // remain after applying all path filters, value filters, and type filters. +-// If at least one Ignore exists in S, then the comparison is ignored. +-// If the number of Transformer and Comparer options in S is non-zero, ++// If at least one [Ignore] exists in S, then the comparison is ignored. ++// If the number of [Transformer] and [Comparer] options in S is non-zero, + // then Equal panics because it is ambiguous which option to use. +-// If S contains a single Transformer, then use that to transform ++// If S contains a single [Transformer], then use that to transform + // the current values and recursively call Equal on the output values. +-// If S contains a single Comparer, then use that to compare the current values. ++// If S contains a single [Comparer], then use that to compare the current values. + // Otherwise, evaluation proceeds to the next rule. + // + // - If the values have an Equal method of the form "(T) Equal(T) bool" or +@@ -66,21 +67,22 @@ import ( + // Functions are only equal if they are both nil, otherwise they are unequal. + // + // Structs are equal if recursively calling Equal on all fields report equal. +-// If a struct contains unexported fields, Equal panics unless an Ignore option +-// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option +-// explicitly permits comparing the unexported field. ++// If a struct contains unexported fields, Equal panics unless an [Ignore] option ++// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field ++// or the [Exporter] option explicitly permits comparing the unexported field. + // + // Slices are equal if they are both nil or both non-nil, where recursively + // calling Equal on all non-ignored slice or array elements report equal. + // Empty non-nil slices and nil slices are not equal; to equate empty slices, +-// consider using cmpopts.EquateEmpty. ++// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. + // + // Maps are equal if they are both nil or both non-nil, where recursively + // calling Equal on all non-ignored map entries report equal. + // Map keys are equal according to the == operator. +-// To use custom comparisons for map keys, consider using cmpopts.SortMaps. ++// To use custom comparisons for map keys, consider using ++// [github.com/google/go-cmp/cmp/cmpopts.SortMaps]. + // Empty non-nil maps and nil maps are not equal; to equate empty maps, +-// consider using cmpopts.EquateEmpty. ++// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. + // + // Pointers and interfaces are equal if they are both nil or both non-nil, + // where they have the same underlying concrete type and recursively +diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export.go +similarity index 94% +rename from vendor/github.com/google/go-cmp/cmp/export_unsafe.go +rename to vendor/github.com/google/go-cmp/cmp/export.go +index e2c0f74e839..29f82fe6b2f 100644 +--- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go ++++ b/vendor/github.com/google/go-cmp/cmp/export.go +@@ -2,9 +2,6 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !purego +-// +build !purego +- + package cmp + + import ( +@@ -12,8 +9,6 @@ import ( + "unsafe" + ) + +-const supportExporters = true +- + // retrieveUnexportedField uses unsafe to forcibly retrieve any field from + // a struct such that the value has read-write permissions. + // +diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go +deleted file mode 100644 +index ae851fe53f2..00000000000 +--- a/vendor/github.com/google/go-cmp/cmp/export_panic.go ++++ /dev/null +@@ -1,16 +0,0 @@ +-// Copyright 2017, The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//go:build purego +-// +build purego +- +-package cmp +- +-import "reflect" +- +-const supportExporters = false +- +-func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { +- panic("no support for forcibly accessing unexported fields") +-} +diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go +similarity index 95% +rename from vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +rename to vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go +index 16e6860af6e..e5dfff69afa 100644 +--- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go ++++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go +@@ -2,9 +2,6 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !purego +-// +build !purego +- + package value + + import ( +diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +deleted file mode 100644 +index 1a71bfcbd39..00000000000 +--- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go ++++ /dev/null +@@ -1,34 +0,0 @@ +-// Copyright 2018, The Go Authors. All rights reserved. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-//go:build purego +-// +build purego +- +-package value +- +-import "reflect" +- +-// Pointer is an opaque typed pointer and is guaranteed to be comparable. +-type Pointer struct { +- p uintptr +- t reflect.Type +-} +- +-// PointerOf returns a Pointer from v, which must be a +-// reflect.Ptr, reflect.Slice, or reflect.Map. +-func PointerOf(v reflect.Value) Pointer { +- // NOTE: Storing a pointer as an uintptr is technically incorrect as it +- // assumes that the GC implementation does not use a moving collector. +- return Pointer{v.Pointer(), v.Type()} +-} +- +-// IsNil reports whether the pointer is nil. +-func (p Pointer) IsNil() bool { +- return p.p == 0 +-} +- +-// Uintptr returns the pointer as a uintptr. +-func (p Pointer) Uintptr() uintptr { +- return p.p +-} +diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go +index 1f9ca9c4892..754496f3b3f 100644 +--- a/vendor/github.com/google/go-cmp/cmp/options.go ++++ b/vendor/github.com/google/go-cmp/cmp/options.go +@@ -13,15 +13,15 @@ import ( + "github.com/google/go-cmp/cmp/internal/function" + ) + +-// Option configures for specific behavior of Equal and Diff. In particular, +-// the fundamental Option functions (Ignore, Transformer, and Comparer), ++// Option configures for specific behavior of [Equal] and [Diff]. In particular, ++// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]), + // configure how equality is determined. + // +-// The fundamental options may be composed with filters (FilterPath and +-// FilterValues) to control the scope over which they are applied. ++// The fundamental options may be composed with filters ([FilterPath] and ++// [FilterValues]) to control the scope over which they are applied. + // +-// The cmp/cmpopts package provides helper functions for creating options that +-// may be used with Equal and Diff. ++// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions ++// for creating options that may be used with [Equal] and [Diff]. + type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. +@@ -56,9 +56,9 @@ type core struct{} + + func (core) isCore() {} + +-// Options is a list of Option values that also satisfies the Option interface. ++// Options is a list of [Option] values that also satisfies the [Option] interface. + // Helper comparison packages may return an Options value when packing multiple +-// Option values into a single Option. When this package processes an Options, ++// [Option] values into a single [Option]. When this package processes an Options, + // it will be implicitly expanded into a flat list. + // + // Applying a filter on an Options is equivalent to applying that same filter +@@ -105,16 +105,16 @@ func (opts Options) String() string { + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) + } + +-// FilterPath returns a new Option where opt is only evaluated if filter f +-// returns true for the current Path in the value tree. ++// FilterPath returns a new [Option] where opt is only evaluated if filter f ++// returns true for the current [Path] in the value tree. + // + // This filter is called even if a slice element or map entry is missing and + // provides an opportunity to ignore such cases. The filter function must be + // symmetric such that the filter result is identical regardless of whether the + // missing value is from x or y. + // +-// The option passed in may be an Ignore, Transformer, Comparer, Options, or +-// a previously filtered Option. ++// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or ++// a previously filtered [Option]. + func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") +@@ -142,7 +142,7 @@ func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) + } + +-// FilterValues returns a new Option where opt is only evaluated if filter f, ++// FilterValues returns a new [Option] where opt is only evaluated if filter f, + // which is a function of the form "func(T, T) bool", returns true for the + // current pair of values being compared. If either value is invalid or + // the type of the values is not assignable to T, then this filter implicitly +@@ -154,8 +154,8 @@ func (f pathFilter) String() string { + // If T is an interface, it is possible that f is called with two values with + // different concrete types that both implement T. + // +-// The option passed in may be an Ignore, Transformer, Comparer, Options, or +-// a previously filtered Option. ++// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or ++// a previously filtered [Option]. + func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { +@@ -192,9 +192,9 @@ func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) + } + +-// Ignore is an Option that causes all comparisons to be ignored. +-// This value is intended to be combined with FilterPath or FilterValues. +-// It is an error to pass an unfiltered Ignore option to Equal. ++// Ignore is an [Option] that causes all comparisons to be ignored. ++// This value is intended to be combined with [FilterPath] or [FilterValues]. ++// It is an error to pass an unfiltered Ignore option to [Equal]. + func Ignore() Option { return ignore{} } + + type ignore struct{ core } +@@ -234,6 +234,8 @@ func (validator) apply(s *state, vx, vy reflect.Value) { + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" ++ } else if t.Comparable() { ++ help = "consider using cmpopts.EquateComparable to compare comparable Go types" + } + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. +@@ -254,7 +256,7 @@ const identRx = `[_\p{L}][_\p{L}\p{N}]*` + + var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +-// Transformer returns an Option that applies a transformation function that ++// Transformer returns an [Option] that applies a transformation function that + // converts values of a certain type into that of another. + // + // The transformer f must be a function "func(T) R" that converts values of +@@ -265,13 +267,14 @@ var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + // same transform to the output of itself (e.g., in the case where the + // input and output types are the same), an implicit filter is added such that + // a transformer is applicable only if that exact transformer is not already +-// in the tail of the Path since the last non-Transform step. ++// in the tail of the [Path] since the last non-[Transform] step. + // For situations where the implicit filter is still insufficient, +-// consider using cmpopts.AcyclicTransformer, which adds a filter +-// to prevent the transformer from being recursively applied upon itself. ++// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer], ++// which adds a filter to prevent the transformer from ++// being recursively applied upon itself. + // +-// The name is a user provided label that is used as the Transform.Name in the +-// transformation PathStep (and eventually shown in the Diff output). ++// The name is a user provided label that is used as the [Transform.Name] in the ++// transformation [PathStep] (and eventually shown in the [Diff] output). + // The name must be a valid identifier or qualified identifier in Go syntax. + // If empty, an arbitrary name is used. + func Transformer(name string, f interface{}) Option { +@@ -329,7 +332,7 @@ func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) + } + +-// Comparer returns an Option that determines whether two values are equal ++// Comparer returns an [Option] that determines whether two values are equal + // to each other. + // + // The comparer f must be a function "func(T, T) bool" and is implicitly +@@ -377,35 +380,32 @@ func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) + } + +-// Exporter returns an Option that specifies whether Equal is allowed to ++// Exporter returns an [Option] that specifies whether [Equal] is allowed to + // introspect into the unexported fields of certain struct types. + // + // Users of this option must understand that comparing on unexported fields + // from external packages is not safe since changes in the internal +-// implementation of some external package may cause the result of Equal ++// implementation of some external package may cause the result of [Equal] + // to unexpectedly change. However, it may be valid to use this option on types + // defined in an internal package where the semantic meaning of an unexported + // field is in the control of the user. + // +-// In many cases, a custom Comparer should be used instead that defines ++// In many cases, a custom [Comparer] should be used instead that defines + // equality as a function of the public API of a type rather than the underlying + // unexported implementation. + // +-// For example, the reflect.Type documentation defines equality to be determined ++// For example, the [reflect.Type] documentation defines equality to be determined + // by the == operator on the interface (essentially performing a shallow pointer +-// comparison) and most attempts to compare *regexp.Regexp types are interested ++// comparison) and most attempts to compare *[regexp.Regexp] types are interested + // in only checking that the regular expression strings are equal. +-// Both of these are accomplished using Comparers: ++// Both of these are accomplished using [Comparer] options: + // + // Comparer(func(x, y reflect.Type) bool { return x == y }) + // Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) + // +-// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +-// all unexported fields on specified struct types. ++// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported] ++// option can be used to ignore all unexported fields on specified struct types. + func Exporter(f func(reflect.Type) bool) Option { +- if !supportExporters { +- panic("Exporter is not supported on purego builds") +- } + return exporter(f) + } + +@@ -415,10 +415,10 @@ func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableO + panic("not implemented") + } + +-// AllowUnexported returns an Options that allows Equal to forcibly introspect ++// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect + // unexported fields of the specified struct types. + // +-// See Exporter for the proper use of this option. ++// See [Exporter] for the proper use of this option. + func AllowUnexported(types ...interface{}) Option { + m := make(map[reflect.Type]bool) + for _, typ := range types { +@@ -432,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { + } + + // Result represents the comparison result for a single node and +-// is provided by cmp when calling Report (see Reporter). ++// is provided by cmp when calling Report (see [Reporter]). + type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +@@ -445,7 +445,7 @@ func (r Result) Equal() bool { + } + + // ByIgnore reports whether the node is equal because it was ignored. +-// This never reports true if Equal reports false. ++// This never reports true if [Result.Equal] reports false. + func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 + } +@@ -455,7 +455,7 @@ func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 + } + +-// ByFunc reports whether a Comparer function determined equality. ++// ByFunc reports whether a [Comparer] function determined equality. + func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 + } +@@ -478,7 +478,7 @@ const ( + reportByCycle + ) + +-// Reporter is an Option that can be passed to Equal. When Equal traverses ++// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses + // the value trees, it calls PushStep as it descends into each node in the + // tree and PopStep as it ascend out of the node. The leaves of the tree are + // either compared (determined to be equal or not equal) or ignored and reported +diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go +index a0a588502ed..c3c1456423c 100644 +--- a/vendor/github.com/google/go-cmp/cmp/path.go ++++ b/vendor/github.com/google/go-cmp/cmp/path.go +@@ -14,9 +14,9 @@ import ( + "github.com/google/go-cmp/cmp/internal/value" + ) + +-// Path is a list of PathSteps describing the sequence of operations to get ++// Path is a list of [PathStep] describing the sequence of operations to get + // from some root type to the current position in the value tree. +-// The first Path element is always an operation-less PathStep that exists ++// The first Path element is always an operation-less [PathStep] that exists + // simply to identify the initial type. + // + // When traversing structs with embedded structs, the embedded struct will +@@ -29,8 +29,13 @@ type Path []PathStep + // a value's tree structure. Users of this package never need to implement + // these types as values of this type will be returned by this package. + // +-// Implementations of this interface are +-// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. ++// Implementations of this interface: ++// - [StructField] ++// - [SliceIndex] ++// - [MapIndex] ++// - [Indirect] ++// - [TypeAssertion] ++// - [Transform] + type PathStep interface { + String() string + +@@ -70,8 +75,9 @@ func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] + } + +-// Last returns the last PathStep in the Path. +-// If the path is empty, this returns a non-nil PathStep that reports a nil Type. ++// Last returns the last [PathStep] in the Path. ++// If the path is empty, this returns a non-nil [PathStep] ++// that reports a nil [PathStep.Type]. + func (pa Path) Last() PathStep { + return pa.Index(-1) + } +@@ -79,7 +85,8 @@ func (pa Path) Last() PathStep { + // Index returns the ith step in the Path and supports negative indexing. + // A negative index starts counting from the tail of the Path such that -1 + // refers to the last step, -2 refers to the second-to-last step, and so on. +-// If index is invalid, this returns a non-nil PathStep that reports a nil Type. ++// If index is invalid, this returns a non-nil [PathStep] ++// that reports a nil [PathStep.Type]. + func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i +@@ -168,7 +175,8 @@ func (ps pathStep) String() string { + return fmt.Sprintf("{%s}", s) + } + +-// StructField represents a struct field access on a field called Name. ++// StructField is a [PathStep] that represents a struct field access ++// on a field called [StructField.Name]. + type StructField struct{ *structField } + type structField struct { + pathStep +@@ -204,10 +212,11 @@ func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + func (sf StructField) Name() string { return sf.name } + + // Index is the index of the field in the parent struct type. +-// See reflect.Type.Field. ++// See [reflect.Type.Field]. + func (sf StructField) Index() int { return sf.idx } + +-// SliceIndex is an index operation on a slice or array at some index Key. ++// SliceIndex is a [PathStep] that represents an index operation on ++// a slice or array at some index [SliceIndex.Key]. + type SliceIndex struct{ *sliceIndex } + type sliceIndex struct { + pathStep +@@ -247,12 +256,12 @@ func (si SliceIndex) Key() int { + // all of the indexes to be shifted. If an index is -1, then that + // indicates that the element does not exist in the associated slice. + // +-// Key is guaranteed to return -1 if and only if the indexes returned +-// by SplitKeys are not the same. SplitKeys will never return -1 for ++// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes ++// returned by SplitKeys are not the same. SplitKeys will never return -1 for + // both indexes. + func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +-// MapIndex is an index operation on a map at some index Key. ++// MapIndex is a [PathStep] that represents an index operation on a map at some index Key. + type MapIndex struct{ *mapIndex } + type mapIndex struct { + pathStep +@@ -266,7 +275,7 @@ func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", + // Key is the value of the map key. + func (mi MapIndex) Key() reflect.Value { return mi.key } + +-// Indirect represents pointer indirection on the parent type. ++// Indirect is a [PathStep] that represents pointer indirection on the parent type. + type Indirect struct{ *indirect } + type indirect struct { + pathStep +@@ -276,7 +285,7 @@ func (in Indirect) Type() reflect.Type { return in.typ } + func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } + func (in Indirect) String() string { return "*" } + +-// TypeAssertion represents a type assertion on an interface. ++// TypeAssertion is a [PathStep] that represents a type assertion on an interface. + type TypeAssertion struct{ *typeAssertion } + type typeAssertion struct { + pathStep +@@ -286,7 +295,8 @@ func (ta TypeAssertion) Type() reflect.Type { return ta.typ } + func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } + func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } + +-// Transform is a transformation from the parent type to the current type. ++// Transform is a [PathStep] that represents a transformation ++// from the parent type to the current type. + type Transform struct{ *transform } + type transform struct { + pathStep +@@ -297,13 +307,13 @@ func (tf Transform) Type() reflect.Type { return tf.typ } + func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } + func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +-// Name is the name of the Transformer. ++// Name is the name of the [Transformer]. + func (tf Transform) Name() string { return tf.trans.name } + + // Func is the function pointer to the transformer function. + func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +-// Option returns the originally constructed Transformer option. ++// Option returns the originally constructed [Transformer] option. + // The == operator can be used to detect the exact option used. + func (tf Transform) Option() Option { return tf.trans } + +diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go +index 2ab41fad3fb..e39f42284ee 100644 +--- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go ++++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go +@@ -199,7 +199,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, + break + } + sf := t.Field(i) +- if supportExporters && !isExported(sf.Name) { ++ if !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) +diff --git a/vendor/github.com/google/s2a-go/.gitignore b/vendor/github.com/google/s2a-go/.gitignore +new file mode 100644 +index 00000000000..01764d1cdf2 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/.gitignore +@@ -0,0 +1,6 @@ ++# Ignore binaries without extension ++//example/client/client ++//example/server/server ++//internal/v2/fakes2av2_server/fakes2av2_server ++ ++.idea/ +\ No newline at end of file +diff --git a/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md b/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md +new file mode 100644 +index 00000000000..dc079b4d66e +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md +@@ -0,0 +1,93 @@ ++# Code of Conduct ++ ++## Our Pledge ++ ++In the interest of fostering an open and welcoming environment, we as ++contributors and maintainers pledge to making participation in our project and ++our community a harassment-free experience for everyone, regardless of age, body ++size, disability, ethnicity, gender identity and expression, level of ++experience, education, socio-economic status, nationality, personal appearance, ++race, religion, or sexual identity and orientation. ++ ++## Our Standards ++ ++Examples of behavior that contributes to creating a positive environment ++include: ++ ++* Using welcoming and inclusive language ++* Being respectful of differing viewpoints and experiences ++* Gracefully accepting constructive criticism ++* Focusing on what is best for the community ++* Showing empathy towards other community members ++ ++Examples of unacceptable behavior by participants include: ++ ++* The use of sexualized language or imagery and unwelcome sexual attention or ++ advances ++* Trolling, insulting/derogatory comments, and personal or political attacks ++* Public or private harassment ++* Publishing others' private information, such as a physical or electronic ++ address, without explicit permission ++* Other conduct which could reasonably be considered inappropriate in a ++ professional setting ++ ++## Our Responsibilities ++ ++Project maintainers are responsible for clarifying the standards of acceptable ++behavior and are expected to take appropriate and fair corrective action in ++response to any instances of unacceptable behavior. ++ ++Project maintainers have the right and responsibility to remove, edit, or reject ++comments, commits, code, wiki edits, issues, and other contributions that are ++not aligned to this Code of Conduct, or to ban temporarily or permanently any ++contributor for other behaviors that they deem inappropriate, threatening, ++offensive, or harmful. ++ ++## Scope ++ ++This Code of Conduct applies both within project spaces and in public spaces ++when an individual is representing the project or its community. Examples of ++representing a project or community include using an official project e-mail ++address, posting via an official social media account, or acting as an appointed ++representative at an online or offline event. Representation of a project may be ++further defined and clarified by project maintainers. ++ ++This Code of Conduct also applies outside the project spaces when the Project ++Steward has a reasonable belief that an individual's behavior may have a ++negative impact on the project or its community. ++ ++## Conflict Resolution ++ ++We do not believe that all conflict is bad; healthy debate and disagreement ++often yield positive results. However, it is never okay to be disrespectful or ++to engage in behavior that violates the project’s code of conduct. ++ ++If you see someone violating the code of conduct, you are encouraged to address ++the behavior directly with those involved. Many issues can be resolved quickly ++and easily, and this gives people more control over the outcome of their ++dispute. If you are unable to resolve the matter for any reason, or if the ++behavior is threatening or harassing, report it. We are dedicated to providing ++an environment where participants feel welcome and safe. ++ ++Reports should be directed to *[PROJECT STEWARD NAME(s) AND EMAIL(s)]*, the ++Project Steward(s) for *[PROJECT NAME]*. It is the Project Steward’s duty to ++receive and address reported violations of the code of conduct. They will then ++work with a committee consisting of representatives from the Open Source ++Programs Office and the Google Open Source Strategy team. If for any reason you ++are uncomfortable reaching out to the Project Steward, please email ++opensource@google.com. ++ ++We will investigate every complaint, but you may not receive a direct response. ++We will use our discretion in determining when and how to follow up on reported ++incidents, which may range from not taking action to permanent expulsion from ++the project and project-sponsored spaces. We will notify the accused of the ++report and provide them an opportunity to discuss it before any action is taken. ++The identity of the reporter will be omitted from the details of the report ++supplied to the accused. In potentially harmful situations, such as ongoing ++harassment or threats to anyone's safety, we may take action without notice. ++ ++## Attribution ++ ++This Code of Conduct is adapted from the Contributor Covenant, version 1.4, ++available at ++https://www.contributor-covenant.org/version/1/4/code-of-conduct.html +diff --git a/vendor/github.com/google/s2a-go/CONTRIBUTING.md b/vendor/github.com/google/s2a-go/CONTRIBUTING.md +new file mode 100644 +index 00000000000..22b241cb732 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/CONTRIBUTING.md +@@ -0,0 +1,29 @@ ++# How to Contribute ++ ++We'd love to accept your patches and contributions to this project. There are ++just a few small guidelines you need to follow. ++ ++## Contributor License Agreement ++ ++Contributions to this project must be accompanied by a Contributor License ++Agreement (CLA). You (or your employer) retain the copyright to your ++contribution; this simply gives us permission to use and redistribute your ++contributions as part of the project. Head over to ++ to see your current agreements on file or ++to sign a new one. ++ ++You generally only need to submit a CLA once, so if you've already submitted one ++(even if it was for a different project), you probably don't need to do it ++again. ++ ++## Code reviews ++ ++All submissions, including submissions by project members, require review. We ++use GitHub pull requests for this purpose. Consult ++[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more ++information on using pull requests. ++ ++## Community Guidelines ++ ++This project follows ++[Google's Open Source Community Guidelines](https://opensource.google/conduct/). +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE b/vendor/github.com/google/s2a-go/LICENSE.md +similarity index 99% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE +rename to vendor/github.com/google/s2a-go/LICENSE.md +index 261eeb9e9f8..d6456956733 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE ++++ b/vendor/github.com/google/s2a-go/LICENSE.md +@@ -1,3 +1,4 @@ ++ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ +diff --git a/vendor/github.com/google/s2a-go/README.md b/vendor/github.com/google/s2a-go/README.md +new file mode 100644 +index 00000000000..d566950f385 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/README.md +@@ -0,0 +1,17 @@ ++# Secure Session Agent Client Libraries ++ ++The Secure Session Agent is a service that enables a workload to offload select ++operations from the mTLS handshake and protects a workload's private key ++material from exfiltration. Specifically, the workload asks the Secure Session ++Agent for the TLS configuration to use during the handshake, to perform private ++key operations, and to validate the peer certificate chain. The Secure Session ++Agent's client libraries enable applications to communicate with the Secure ++Session Agent during the TLS handshake, and to encrypt traffic to the peer ++after the TLS handshake is complete. ++ ++This repository contains the source code for the Secure Session Agent's Go ++client libraries, which allow gRPC-Go applications to use the Secure Session ++Agent. This repository supports the Bazel and Golang build systems. ++ ++All code in this repository is experimental and subject to change. We do not ++guarantee API stability at this time. +diff --git a/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go b/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go +new file mode 100644 +index 00000000000..034d1b912ca +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go +@@ -0,0 +1,167 @@ ++/* ++ * ++ * Copyright 2023 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package fallback provides default implementations of fallback options when S2A fails. ++package fallback ++ ++import ( ++ "context" ++ "crypto/tls" ++ "fmt" ++ "net" ++ ++ "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/grpclog" ++) ++ ++const ( ++ alpnProtoStrH2 = "h2" ++ alpnProtoStrHTTP = "http/1.1" ++ defaultHTTPSPort = "443" ++) ++ ++// FallbackTLSConfigGRPC is a tls.Config used by the DefaultFallbackClientHandshakeFunc function. ++// It supports GRPC use case, thus the alpn is set to 'h2'. ++var FallbackTLSConfigGRPC = tls.Config{ ++ MinVersion: tls.VersionTLS13, ++ ClientSessionCache: nil, ++ NextProtos: []string{alpnProtoStrH2}, ++} ++ ++// FallbackTLSConfigHTTP is a tls.Config used by the DefaultFallbackDialerAndAddress func. ++// It supports the HTTP use case and the alpn is set to both 'http/1.1' and 'h2'. ++var FallbackTLSConfigHTTP = tls.Config{ ++ MinVersion: tls.VersionTLS13, ++ ClientSessionCache: nil, ++ NextProtos: []string{alpnProtoStrH2, alpnProtoStrHTTP}, ++} ++ ++// ClientHandshake establishes a TLS connection and returns it, plus its auth info. ++// Inputs: ++// ++// targetServer: the server attempted with S2A. ++// conn: the tcp connection to the server at address targetServer that was passed into S2A's ClientHandshake func. ++// If fallback is successful, the `conn` should be closed. ++// err: the error encountered when performing the client-side TLS handshake with S2A. ++type ClientHandshake func(ctx context.Context, targetServer string, conn net.Conn, err error) (net.Conn, credentials.AuthInfo, error) ++ ++// DefaultFallbackClientHandshakeFunc returns a ClientHandshake function, ++// which establishes a TLS connection to the provided fallbackAddr, returns the new connection and its auth info. ++// Example use: ++// ++// transportCreds, _ = s2a.NewClientCreds(&s2a.ClientOptions{ ++// S2AAddress: s2aAddress, ++// FallbackOpts: &s2a.FallbackOptions{ // optional ++// FallbackClientHandshakeFunc: fallback.DefaultFallbackClientHandshakeFunc(fallbackAddr), ++// }, ++// }) ++// ++// The fallback server's certificate must be verifiable using OS root store. ++// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, ++// it uses default port 443. ++// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, ++// and min TLS version is set to 1.3. ++func DefaultFallbackClientHandshakeFunc(fallbackAddr string) (ClientHandshake, error) { ++ var fallbackDialer = tls.Dialer{Config: &FallbackTLSConfigGRPC} ++ return defaultFallbackClientHandshakeFuncInternal(fallbackAddr, fallbackDialer.DialContext) ++} ++ ++func defaultFallbackClientHandshakeFuncInternal(fallbackAddr string, dialContextFunc func(context.Context, string, string) (net.Conn, error)) (ClientHandshake, error) { ++ fallbackServerAddr, err := processFallbackAddr(fallbackAddr) ++ if err != nil { ++ if grpclog.V(1) { ++ grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) ++ } ++ return nil, err ++ } ++ return func(ctx context.Context, targetServer string, conn net.Conn, s2aErr error) (net.Conn, credentials.AuthInfo, error) { ++ fbConn, fbErr := dialContextFunc(ctx, "tcp", fallbackServerAddr) ++ if fbErr != nil { ++ grpclog.Infof("dialing to fallback server %s failed: %v", fallbackServerAddr, fbErr) ++ return nil, nil, fmt.Errorf("dialing to fallback server %s failed: %v; S2A client handshake with %s error: %w", fallbackServerAddr, fbErr, targetServer, s2aErr) ++ } ++ ++ tc, success := fbConn.(*tls.Conn) ++ if !success { ++ grpclog.Infof("the connection with fallback server is expected to be tls but isn't") ++ return nil, nil, fmt.Errorf("the connection with fallback server is expected to be tls but isn't; S2A client handshake with %s error: %w", targetServer, s2aErr) ++ } ++ ++ tlsInfo := credentials.TLSInfo{ ++ State: tc.ConnectionState(), ++ CommonAuthInfo: credentials.CommonAuthInfo{ ++ SecurityLevel: credentials.PrivacyAndIntegrity, ++ }, ++ } ++ if grpclog.V(1) { ++ grpclog.Infof("ConnectionState.NegotiatedProtocol: %v", tc.ConnectionState().NegotiatedProtocol) ++ grpclog.Infof("ConnectionState.HandshakeComplete: %v", tc.ConnectionState().HandshakeComplete) ++ grpclog.Infof("ConnectionState.ServerName: %v", tc.ConnectionState().ServerName) ++ } ++ conn.Close() ++ return fbConn, tlsInfo, nil ++ }, nil ++} ++ ++// DefaultFallbackDialerAndAddress returns a TLS dialer and the network address to dial. ++// Example use: ++// ++// fallbackDialer, fallbackServerAddr := fallback.DefaultFallbackDialerAndAddress(fallbackAddr) ++// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ ++// S2AAddress: s2aAddress, // required ++// FallbackOpts: &s2a.FallbackOptions{ ++// FallbackDialer: &s2a.FallbackDialer{ ++// Dialer: fallbackDialer, ++// ServerAddr: fallbackServerAddr, ++// }, ++// }, ++// }) ++// ++// The fallback server's certificate should be verifiable using OS root store. ++// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, ++// it uses default port 443. ++// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, ++// and min TLS version is set to 1.3. ++func DefaultFallbackDialerAndAddress(fallbackAddr string) (*tls.Dialer, string, error) { ++ fallbackServerAddr, err := processFallbackAddr(fallbackAddr) ++ if err != nil { ++ if grpclog.V(1) { ++ grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) ++ } ++ return nil, "", err ++ } ++ return &tls.Dialer{Config: &FallbackTLSConfigHTTP}, fallbackServerAddr, nil ++} ++ ++func processFallbackAddr(fallbackAddr string) (string, error) { ++ var fallbackServerAddr string ++ var err error ++ ++ if fallbackAddr == "" { ++ return "", fmt.Errorf("empty fallback address") ++ } ++ _, _, err = net.SplitHostPort(fallbackAddr) ++ if err != nil { ++ // fallbackAddr does not have port suffix ++ fallbackServerAddr = net.JoinHostPort(fallbackAddr, defaultHTTPSPort) ++ } else { ++ // FallbackServerAddr already has port suffix ++ fallbackServerAddr = fallbackAddr ++ } ++ return fallbackServerAddr, nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go b/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go +new file mode 100644 +index 00000000000..aa3967f9d1f +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go +@@ -0,0 +1,119 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package authinfo provides authentication and authorization information that ++// results from the TLS handshake. ++package authinfo ++ ++import ( ++ "errors" ++ ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ contextpb "github.com/google/s2a-go/internal/proto/s2a_context_go_proto" ++ grpcpb "github.com/google/s2a-go/internal/proto/s2a_go_proto" ++ "google.golang.org/grpc/credentials" ++) ++ ++var _ credentials.AuthInfo = (*S2AAuthInfo)(nil) ++ ++const s2aAuthType = "s2a" ++ ++// S2AAuthInfo exposes authentication and authorization information from the ++// S2A session result to the gRPC stack. ++type S2AAuthInfo struct { ++ s2aContext *contextpb.S2AContext ++ commonAuthInfo credentials.CommonAuthInfo ++} ++ ++// NewS2AAuthInfo returns a new S2AAuthInfo object from the S2A session result. ++func NewS2AAuthInfo(result *grpcpb.SessionResult) (credentials.AuthInfo, error) { ++ return newS2AAuthInfo(result) ++} ++ ++func newS2AAuthInfo(result *grpcpb.SessionResult) (*S2AAuthInfo, error) { ++ if result == nil { ++ return nil, errors.New("NewS2aAuthInfo given nil session result") ++ } ++ return &S2AAuthInfo{ ++ s2aContext: &contextpb.S2AContext{ ++ ApplicationProtocol: result.GetApplicationProtocol(), ++ TlsVersion: result.GetState().GetTlsVersion(), ++ Ciphersuite: result.GetState().GetTlsCiphersuite(), ++ PeerIdentity: result.GetPeerIdentity(), ++ LocalIdentity: result.GetLocalIdentity(), ++ PeerCertFingerprint: result.GetPeerCertFingerprint(), ++ LocalCertFingerprint: result.GetLocalCertFingerprint(), ++ IsHandshakeResumed: result.GetState().GetIsHandshakeResumed(), ++ }, ++ commonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, ++ }, nil ++} ++ ++// AuthType returns the authentication type. ++func (s *S2AAuthInfo) AuthType() string { ++ return s2aAuthType ++} ++ ++// ApplicationProtocol returns the application protocol, e.g. "grpc". ++func (s *S2AAuthInfo) ApplicationProtocol() string { ++ return s.s2aContext.GetApplicationProtocol() ++} ++ ++// TLSVersion returns the TLS version negotiated during the handshake. ++func (s *S2AAuthInfo) TLSVersion() commonpb.TLSVersion { ++ return s.s2aContext.GetTlsVersion() ++} ++ ++// Ciphersuite returns the ciphersuite negotiated during the handshake. ++func (s *S2AAuthInfo) Ciphersuite() commonpb.Ciphersuite { ++ return s.s2aContext.GetCiphersuite() ++} ++ ++// PeerIdentity returns the authenticated identity of the peer. ++func (s *S2AAuthInfo) PeerIdentity() *commonpb.Identity { ++ return s.s2aContext.GetPeerIdentity() ++} ++ ++// LocalIdentity returns the local identity of the application used during ++// session setup. ++func (s *S2AAuthInfo) LocalIdentity() *commonpb.Identity { ++ return s.s2aContext.GetLocalIdentity() ++} ++ ++// PeerCertFingerprint returns the SHA256 hash of the peer certificate used in ++// the S2A handshake. ++func (s *S2AAuthInfo) PeerCertFingerprint() []byte { ++ return s.s2aContext.GetPeerCertFingerprint() ++} ++ ++// LocalCertFingerprint returns the SHA256 hash of the local certificate used ++// in the S2A handshake. ++func (s *S2AAuthInfo) LocalCertFingerprint() []byte { ++ return s.s2aContext.GetLocalCertFingerprint() ++} ++ ++// IsHandshakeResumed returns true if a cached session was used to resume ++// the handshake. ++func (s *S2AAuthInfo) IsHandshakeResumed() bool { ++ return s.s2aContext.GetIsHandshakeResumed() ++} ++ ++// SecurityLevel returns the security level of the connection. ++func (s *S2AAuthInfo) SecurityLevel() credentials.SecurityLevel { ++ return s.commonAuthInfo.SecurityLevel ++} +diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go b/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go +new file mode 100644 +index 00000000000..8297c9a9746 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go +@@ -0,0 +1,438 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package handshaker communicates with the S2A handshaker service. ++package handshaker ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ "io" ++ "net" ++ "sync" ++ ++ "github.com/google/s2a-go/internal/authinfo" ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" ++ "github.com/google/s2a-go/internal/record" ++ "github.com/google/s2a-go/internal/tokenmanager" ++ grpc "google.golang.org/grpc" ++ "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/grpclog" ++) ++ ++var ( ++ // appProtocol contains the application protocol accepted by the handshaker. ++ appProtocol = "grpc" ++ // frameLimit is the maximum size of a frame in bytes. ++ frameLimit = 1024 * 64 ++ // peerNotRespondingError is the error thrown when the peer doesn't respond. ++ errPeerNotResponding = errors.New("peer is not responding and re-connection should be attempted") ++) ++ ++// Handshaker defines a handshaker interface. ++type Handshaker interface { ++ // ClientHandshake starts and completes a TLS handshake from the client side, ++ // and returns a secure connection along with additional auth information. ++ ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) ++ // ServerHandshake starts and completes a TLS handshake from the server side, ++ // and returns a secure connection along with additional auth information. ++ ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) ++ // Close terminates the Handshaker. It should be called when the handshake ++ // is complete. ++ Close() error ++} ++ ++// ClientHandshakerOptions contains the options needed to configure the S2A ++// handshaker service on the client-side. ++type ClientHandshakerOptions struct { ++ // MinTLSVersion specifies the min TLS version supported by the client. ++ MinTLSVersion commonpb.TLSVersion ++ // MaxTLSVersion specifies the max TLS version supported by the client. ++ MaxTLSVersion commonpb.TLSVersion ++ // TLSCiphersuites is the ordered list of ciphersuites supported by the ++ // client. ++ TLSCiphersuites []commonpb.Ciphersuite ++ // TargetIdentities contains a list of allowed server identities. One of the ++ // target identities should match the peer identity in the handshake ++ // result; otherwise, the handshake fails. ++ TargetIdentities []*commonpb.Identity ++ // LocalIdentity is the local identity of the client application. If none is ++ // provided, then the S2A will choose the default identity. ++ LocalIdentity *commonpb.Identity ++ // TargetName is the allowed server name, which may be used for server ++ // authorization check by the S2A if it is provided. ++ TargetName string ++ // EnsureProcessSessionTickets allows users to wait and ensure that all ++ // available session tickets are sent to S2A before a process completes. ++ EnsureProcessSessionTickets *sync.WaitGroup ++} ++ ++// ServerHandshakerOptions contains the options needed to configure the S2A ++// handshaker service on the server-side. ++type ServerHandshakerOptions struct { ++ // MinTLSVersion specifies the min TLS version supported by the server. ++ MinTLSVersion commonpb.TLSVersion ++ // MaxTLSVersion specifies the max TLS version supported by the server. ++ MaxTLSVersion commonpb.TLSVersion ++ // TLSCiphersuites is the ordered list of ciphersuites supported by the ++ // server. ++ TLSCiphersuites []commonpb.Ciphersuite ++ // LocalIdentities is the list of local identities that may be assumed by ++ // the server. If no local identity is specified, then the S2A chooses a ++ // default local identity. ++ LocalIdentities []*commonpb.Identity ++} ++ ++// s2aHandshaker performs a TLS handshake using the S2A handshaker service. ++type s2aHandshaker struct { ++ // stream is used to communicate with the S2A handshaker service. ++ stream s2apb.S2AService_SetUpSessionClient ++ // conn is the connection to the peer. ++ conn net.Conn ++ // clientOpts should be non-nil iff the handshaker is client-side. ++ clientOpts *ClientHandshakerOptions ++ // serverOpts should be non-nil iff the handshaker is server-side. ++ serverOpts *ServerHandshakerOptions ++ // isClient determines if the handshaker is client or server side. ++ isClient bool ++ // hsAddr stores the address of the S2A handshaker service. ++ hsAddr string ++ // tokenManager manages access tokens for authenticating to S2A. ++ tokenManager tokenmanager.AccessTokenManager ++ // localIdentities is the set of local identities for whom the ++ // tokenManager should fetch a token when preparing a request to be ++ // sent to S2A. ++ localIdentities []*commonpb.Identity ++} ++ ++// NewClientHandshaker creates an s2aHandshaker instance that performs a ++// client-side TLS handshake using the S2A handshaker service. ++func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ClientHandshakerOptions) (Handshaker, error) { ++ stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) ++ if err != nil { ++ return nil, err ++ } ++ tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() ++ if err != nil { ++ grpclog.Infof("failed to create single token access token manager: %v", err) ++ } ++ return newClientHandshaker(stream, c, hsAddr, opts, tokenManager), nil ++} ++ ++func newClientHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ClientHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { ++ var localIdentities []*commonpb.Identity ++ if opts != nil { ++ localIdentities = []*commonpb.Identity{opts.LocalIdentity} ++ } ++ return &s2aHandshaker{ ++ stream: stream, ++ conn: c, ++ clientOpts: opts, ++ isClient: true, ++ hsAddr: hsAddr, ++ tokenManager: tokenManager, ++ localIdentities: localIdentities, ++ } ++} ++ ++// NewServerHandshaker creates an s2aHandshaker instance that performs a ++// server-side TLS handshake using the S2A handshaker service. ++func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ServerHandshakerOptions) (Handshaker, error) { ++ stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) ++ if err != nil { ++ return nil, err ++ } ++ tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() ++ if err != nil { ++ grpclog.Infof("failed to create single token access token manager: %v", err) ++ } ++ return newServerHandshaker(stream, c, hsAddr, opts, tokenManager), nil ++} ++ ++func newServerHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ServerHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { ++ var localIdentities []*commonpb.Identity ++ if opts != nil { ++ localIdentities = opts.LocalIdentities ++ } ++ return &s2aHandshaker{ ++ stream: stream, ++ conn: c, ++ serverOpts: opts, ++ isClient: false, ++ hsAddr: hsAddr, ++ tokenManager: tokenManager, ++ localIdentities: localIdentities, ++ } ++} ++ ++// ClientHandshake performs a client-side TLS handshake using the S2A handshaker ++// service. When complete, returns a TLS connection. ++func (h *s2aHandshaker) ClientHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { ++ if !h.isClient { ++ return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client-side handshake") ++ } ++ // Extract the hostname from the target name. The target name is assumed to be an authority. ++ hostname, _, err := net.SplitHostPort(h.clientOpts.TargetName) ++ if err != nil { ++ // If the target name had no host port or could not be parsed, use it as is. ++ hostname = h.clientOpts.TargetName ++ } ++ ++ // Prepare a client start message to send to the S2A handshaker service. ++ req := &s2apb.SessionReq{ ++ ReqOneof: &s2apb.SessionReq_ClientStart{ ++ ClientStart: &s2apb.ClientSessionStartReq{ ++ ApplicationProtocols: []string{appProtocol}, ++ MinTlsVersion: h.clientOpts.MinTLSVersion, ++ MaxTlsVersion: h.clientOpts.MaxTLSVersion, ++ TlsCiphersuites: h.clientOpts.TLSCiphersuites, ++ TargetIdentities: h.clientOpts.TargetIdentities, ++ LocalIdentity: h.clientOpts.LocalIdentity, ++ TargetName: hostname, ++ }, ++ }, ++ AuthMechanisms: h.getAuthMechanisms(), ++ } ++ conn, result, err := h.setUpSession(req) ++ if err != nil { ++ return nil, nil, err ++ } ++ authInfo, err := authinfo.NewS2AAuthInfo(result) ++ if err != nil { ++ return nil, nil, err ++ } ++ return conn, authInfo, nil ++} ++ ++// ServerHandshake performs a server-side TLS handshake using the S2A handshaker ++// service. When complete, returns a TLS connection. ++func (h *s2aHandshaker) ServerHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { ++ if h.isClient { ++ return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server-side handshake") ++ } ++ p := make([]byte, frameLimit) ++ n, err := h.conn.Read(p) ++ if err != nil { ++ return nil, nil, err ++ } ++ // Prepare a server start message to send to the S2A handshaker service. ++ req := &s2apb.SessionReq{ ++ ReqOneof: &s2apb.SessionReq_ServerStart{ ++ ServerStart: &s2apb.ServerSessionStartReq{ ++ ApplicationProtocols: []string{appProtocol}, ++ MinTlsVersion: h.serverOpts.MinTLSVersion, ++ MaxTlsVersion: h.serverOpts.MaxTLSVersion, ++ TlsCiphersuites: h.serverOpts.TLSCiphersuites, ++ LocalIdentities: h.serverOpts.LocalIdentities, ++ InBytes: p[:n], ++ }, ++ }, ++ AuthMechanisms: h.getAuthMechanisms(), ++ } ++ conn, result, err := h.setUpSession(req) ++ if err != nil { ++ return nil, nil, err ++ } ++ authInfo, err := authinfo.NewS2AAuthInfo(result) ++ if err != nil { ++ return nil, nil, err ++ } ++ return conn, authInfo, nil ++} ++ ++// setUpSession proxies messages between the peer and the S2A handshaker ++// service. ++func (h *s2aHandshaker) setUpSession(req *s2apb.SessionReq) (net.Conn, *s2apb.SessionResult, error) { ++ resp, err := h.accessHandshakerService(req) ++ if err != nil { ++ return nil, nil, err ++ } ++ // Check if the returned status is an error. ++ if resp.GetStatus() != nil { ++ if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { ++ return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) ++ } ++ } ++ // Calculate the extra unread bytes from the Session. Attempting to consume ++ // more than the bytes sent will throw an error. ++ var extra []byte ++ if req.GetServerStart() != nil { ++ if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { ++ return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") ++ } ++ extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] ++ } ++ result, extra, err := h.processUntilDone(resp, extra) ++ if err != nil { ++ return nil, nil, err ++ } ++ if result.GetLocalIdentity() == nil { ++ return nil, nil, errors.New("local identity must be populated in session result") ++ } ++ ++ // Create a new TLS record protocol using the Session Result. ++ newConn, err := record.NewConn(&record.ConnParameters{ ++ NetConn: h.conn, ++ Ciphersuite: result.GetState().GetTlsCiphersuite(), ++ TLSVersion: result.GetState().GetTlsVersion(), ++ InTrafficSecret: result.GetState().GetInKey(), ++ OutTrafficSecret: result.GetState().GetOutKey(), ++ UnusedBuf: extra, ++ InSequence: result.GetState().GetInSequence(), ++ OutSequence: result.GetState().GetOutSequence(), ++ HSAddr: h.hsAddr, ++ ConnectionID: result.GetState().GetConnectionId(), ++ LocalIdentity: result.GetLocalIdentity(), ++ EnsureProcessSessionTickets: h.ensureProcessSessionTickets(), ++ }) ++ if err != nil { ++ return nil, nil, err ++ } ++ return newConn, result, nil ++} ++ ++func (h *s2aHandshaker) ensureProcessSessionTickets() *sync.WaitGroup { ++ if h.clientOpts == nil { ++ return nil ++ } ++ return h.clientOpts.EnsureProcessSessionTickets ++} ++ ++// accessHandshakerService sends the session request to the S2A handshaker ++// service and returns the session response. ++func (h *s2aHandshaker) accessHandshakerService(req *s2apb.SessionReq) (*s2apb.SessionResp, error) { ++ if err := h.stream.Send(req); err != nil { ++ return nil, err ++ } ++ resp, err := h.stream.Recv() ++ if err != nil { ++ return nil, err ++ } ++ return resp, nil ++} ++ ++// processUntilDone continues proxying messages between the peer and the S2A ++// handshaker service until the handshaker service returns the SessionResult at ++// the end of the handshake or an error occurs. ++func (h *s2aHandshaker) processUntilDone(resp *s2apb.SessionResp, unusedBytes []byte) (*s2apb.SessionResult, []byte, error) { ++ for { ++ if len(resp.OutFrames) > 0 { ++ if _, err := h.conn.Write(resp.OutFrames); err != nil { ++ return nil, nil, err ++ } ++ } ++ if resp.Result != nil { ++ return resp.Result, unusedBytes, nil ++ } ++ buf := make([]byte, frameLimit) ++ n, err := h.conn.Read(buf) ++ if err != nil && err != io.EOF { ++ return nil, nil, err ++ } ++ // If there is nothing to send to the handshaker service and nothing is ++ // received from the peer, then we are stuck. This covers the case when ++ // the peer is not responding. Note that handshaker service connection ++ // issues are caught in accessHandshakerService before we even get ++ // here. ++ if len(resp.OutFrames) == 0 && n == 0 { ++ return nil, nil, errPeerNotResponding ++ } ++ // Append extra bytes from the previous interaction with the handshaker ++ // service with the current buffer read from conn. ++ p := append(unusedBytes, buf[:n]...) ++ // From here on, p and unusedBytes point to the same slice. ++ resp, err = h.accessHandshakerService(&s2apb.SessionReq{ ++ ReqOneof: &s2apb.SessionReq_Next{ ++ Next: &s2apb.SessionNextReq{ ++ InBytes: p, ++ }, ++ }, ++ AuthMechanisms: h.getAuthMechanisms(), ++ }) ++ if err != nil { ++ return nil, nil, err ++ } ++ ++ // Cache the local identity returned by S2A, if it is populated. This ++ // overwrites any existing local identities. This is done because, once the ++ // S2A has selected a local identity, then only that local identity should ++ // be asserted in future requests until the end of the current handshake. ++ if resp.GetLocalIdentity() != nil { ++ h.localIdentities = []*commonpb.Identity{resp.GetLocalIdentity()} ++ } ++ ++ // Set unusedBytes based on the handshaker service response. ++ if resp.GetBytesConsumed() > uint32(len(p)) { ++ return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") ++ } ++ unusedBytes = p[resp.GetBytesConsumed():] ++ } ++} ++ ++// Close shuts down the handshaker and the stream to the S2A handshaker service ++// when the handshake is complete. It should be called when the caller obtains ++// the secure connection at the end of the handshake. ++func (h *s2aHandshaker) Close() error { ++ return h.stream.CloseSend() ++} ++ ++func (h *s2aHandshaker) getAuthMechanisms() []*s2apb.AuthenticationMechanism { ++ if h.tokenManager == nil { ++ return nil ++ } ++ // First handle the special case when no local identities have been provided ++ // by the application. In this case, an AuthenticationMechanism with no local ++ // identity will be sent. ++ if len(h.localIdentities) == 0 { ++ token, err := h.tokenManager.DefaultToken() ++ if err != nil { ++ grpclog.Infof("unable to get token for empty local identity: %v", err) ++ return nil ++ } ++ return []*s2apb.AuthenticationMechanism{ ++ { ++ MechanismOneof: &s2apb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }, ++ } ++ } ++ ++ // Next, handle the case where the application (or the S2A) has provided ++ // one or more local identities. ++ var authMechanisms []*s2apb.AuthenticationMechanism ++ for _, localIdentity := range h.localIdentities { ++ token, err := h.tokenManager.Token(localIdentity) ++ if err != nil { ++ grpclog.Infof("unable to get token for local identity %v: %v", localIdentity, err) ++ continue ++ } ++ ++ authMechanism := &s2apb.AuthenticationMechanism{ ++ Identity: localIdentity, ++ MechanismOneof: &s2apb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ } ++ authMechanisms = append(authMechanisms, authMechanism) ++ } ++ return authMechanisms ++} +diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go +new file mode 100644 +index 00000000000..49573af887c +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go +@@ -0,0 +1,99 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package service is a utility for calling the S2A handshaker service. ++package service ++ ++import ( ++ "context" ++ "net" ++ "os" ++ "strings" ++ "sync" ++ "time" ++ ++ "google.golang.org/appengine" ++ "google.golang.org/appengine/socket" ++ grpc "google.golang.org/grpc" ++ "google.golang.org/grpc/grpclog" ++) ++ ++// An environment variable, if true, opportunistically use AppEngine-specific dialer to call S2A. ++const enableAppEngineDialerEnv = "S2A_ENABLE_APP_ENGINE_DIALER" ++ ++var ( ++ // appEngineDialerHook is an AppEngine-specific dial option that is set ++ // during init time. If nil, then the application is not running on Google ++ // AppEngine. ++ appEngineDialerHook func(context.Context) grpc.DialOption ++ // mu guards hsConnMap and hsDialer. ++ mu sync.Mutex ++ // hsConnMap represents a mapping from an S2A handshaker service address ++ // to a corresponding connection to an S2A handshaker service instance. ++ hsConnMap = make(map[string]*grpc.ClientConn) ++ // hsDialer will be reassigned in tests. ++ hsDialer = grpc.Dial ++) ++ ++func init() { ++ if !appengine.IsAppEngine() && !appengine.IsDevAppServer() { ++ return ++ } ++ appEngineDialerHook = func(ctx context.Context) grpc.DialOption { ++ return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { ++ return socket.DialTimeout(ctx, "tcp", addr, timeout) ++ }) ++ } ++} ++ ++// Dial dials the S2A handshaker service. If a connection has already been ++// established, this function returns it. Otherwise, a new connection is ++// created. ++func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { ++ mu.Lock() ++ defer mu.Unlock() ++ ++ hsConn, ok := hsConnMap[handshakerServiceAddress] ++ if !ok { ++ // Create a new connection to the S2A handshaker service. Note that ++ // this connection stays open until the application is closed. ++ grpcOpts := []grpc.DialOption{ ++ grpc.WithInsecure(), ++ } ++ if enableAppEngineDialer() && appEngineDialerHook != nil { ++ if grpclog.V(1) { ++ grpclog.Info("Using AppEngine-specific dialer to talk to S2A.") ++ } ++ grpcOpts = append(grpcOpts, appEngineDialerHook(context.Background())) ++ } ++ var err error ++ hsConn, err = hsDialer(handshakerServiceAddress, grpcOpts...) ++ if err != nil { ++ return nil, err ++ } ++ hsConnMap[handshakerServiceAddress] = hsConn ++ } ++ return hsConn, nil ++} ++ ++func enableAppEngineDialer() bool { ++ if strings.ToLower(os.Getenv(enableAppEngineDialerEnv)) == "true" { ++ return true ++ } ++ return false ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +new file mode 100644 +index 00000000000..16278a1d995 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +@@ -0,0 +1,389 @@ ++// Copyright 2021 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/common/common.proto ++ ++package common_go_proto ++ ++import ( ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++// The ciphersuites supported by S2A. The name determines the confidentiality, ++// and authentication ciphers as well as the hash algorithm used for PRF in ++// TLS 1.2 or HKDF in TLS 1.3. Thus, the components of the name are: ++// - AEAD -- for encryption and authentication, e.g., AES_128_GCM. ++// - Hash algorithm -- used in PRF or HKDF, e.g., SHA256. ++type Ciphersuite int32 ++ ++const ( ++ Ciphersuite_AES_128_GCM_SHA256 Ciphersuite = 0 ++ Ciphersuite_AES_256_GCM_SHA384 Ciphersuite = 1 ++ Ciphersuite_CHACHA20_POLY1305_SHA256 Ciphersuite = 2 ++) ++ ++// Enum value maps for Ciphersuite. ++var ( ++ Ciphersuite_name = map[int32]string{ ++ 0: "AES_128_GCM_SHA256", ++ 1: "AES_256_GCM_SHA384", ++ 2: "CHACHA20_POLY1305_SHA256", ++ } ++ Ciphersuite_value = map[string]int32{ ++ "AES_128_GCM_SHA256": 0, ++ "AES_256_GCM_SHA384": 1, ++ "CHACHA20_POLY1305_SHA256": 2, ++ } ++) ++ ++func (x Ciphersuite) Enum() *Ciphersuite { ++ p := new(Ciphersuite) ++ *p = x ++ return p ++} ++ ++func (x Ciphersuite) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_common_common_proto_enumTypes[0].Descriptor() ++} ++ ++func (Ciphersuite) Type() protoreflect.EnumType { ++ return &file_internal_proto_common_common_proto_enumTypes[0] ++} ++ ++func (x Ciphersuite) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use Ciphersuite.Descriptor instead. ++func (Ciphersuite) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} ++} ++ ++// The TLS versions supported by S2A's handshaker module. ++type TLSVersion int32 ++ ++const ( ++ TLSVersion_TLS1_2 TLSVersion = 0 ++ TLSVersion_TLS1_3 TLSVersion = 1 ++) ++ ++// Enum value maps for TLSVersion. ++var ( ++ TLSVersion_name = map[int32]string{ ++ 0: "TLS1_2", ++ 1: "TLS1_3", ++ } ++ TLSVersion_value = map[string]int32{ ++ "TLS1_2": 0, ++ "TLS1_3": 1, ++ } ++) ++ ++func (x TLSVersion) Enum() *TLSVersion { ++ p := new(TLSVersion) ++ *p = x ++ return p ++} ++ ++func (x TLSVersion) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_common_common_proto_enumTypes[1].Descriptor() ++} ++ ++func (TLSVersion) Type() protoreflect.EnumType { ++ return &file_internal_proto_common_common_proto_enumTypes[1] ++} ++ ++func (x TLSVersion) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use TLSVersion.Descriptor instead. ++func (TLSVersion) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_common_common_proto_rawDescGZIP(), []int{1} ++} ++ ++type Identity struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // Types that are assignable to IdentityOneof: ++ // ++ // *Identity_SpiffeId ++ // *Identity_Hostname ++ // *Identity_Uid ++ // *Identity_MdbUsername ++ // *Identity_GaiaId ++ IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` ++ // Additional identity-specific attributes. ++ Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` ++} ++ ++func (x *Identity) Reset() { ++ *x = Identity{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_common_common_proto_msgTypes[0] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *Identity) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*Identity) ProtoMessage() {} ++ ++func (x *Identity) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_common_common_proto_msgTypes[0] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use Identity.ProtoReflect.Descriptor instead. ++func (*Identity) Descriptor() ([]byte, []int) { ++ return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} ++} ++ ++func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { ++ if m != nil { ++ return m.IdentityOneof ++ } ++ return nil ++} ++ ++func (x *Identity) GetSpiffeId() string { ++ if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { ++ return x.SpiffeId ++ } ++ return "" ++} ++ ++func (x *Identity) GetHostname() string { ++ if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { ++ return x.Hostname ++ } ++ return "" ++} ++ ++func (x *Identity) GetUid() string { ++ if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { ++ return x.Uid ++ } ++ return "" ++} ++ ++func (x *Identity) GetMdbUsername() string { ++ if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { ++ return x.MdbUsername ++ } ++ return "" ++} ++ ++func (x *Identity) GetGaiaId() string { ++ if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { ++ return x.GaiaId ++ } ++ return "" ++} ++ ++func (x *Identity) GetAttributes() map[string]string { ++ if x != nil { ++ return x.Attributes ++ } ++ return nil ++} ++ ++type isIdentity_IdentityOneof interface { ++ isIdentity_IdentityOneof() ++} ++ ++type Identity_SpiffeId struct { ++ // The SPIFFE ID of a connection endpoint. ++ SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` ++} ++ ++type Identity_Hostname struct { ++ // The hostname of a connection endpoint. ++ Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` ++} ++ ++type Identity_Uid struct { ++ // The UID of a connection endpoint. ++ Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` ++} ++ ++type Identity_MdbUsername struct { ++ // The MDB username of a connection endpoint. ++ MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` ++} ++ ++type Identity_GaiaId struct { ++ // The Gaia ID of a connection endpoint. ++ GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` ++} ++ ++func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} ++ ++func (*Identity_Hostname) isIdentity_IdentityOneof() {} ++ ++func (*Identity_Uid) isIdentity_IdentityOneof() {} ++ ++func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} ++ ++func (*Identity_GaiaId) isIdentity_IdentityOneof() {} ++ ++var File_internal_proto_common_common_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_common_common_proto_rawDesc = []byte{ ++ 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, ++ 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, ++ 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, ++ 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, ++ 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, ++ 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, ++ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, ++ 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, ++ 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, ++ 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, ++ 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, ++ 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, ++ 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, ++ 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, ++ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, ++ 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, ++ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, ++ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, ++ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, ++ 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, ++ 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, ++ 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, ++ 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, ++ 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, ++ 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, ++ 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, ++ 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, ++ 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, ++ 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, ++ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, ++ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, ++ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, ++ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_common_common_proto_rawDescOnce sync.Once ++ file_internal_proto_common_common_proto_rawDescData = file_internal_proto_common_common_proto_rawDesc ++) ++ ++func file_internal_proto_common_common_proto_rawDescGZIP() []byte { ++ file_internal_proto_common_common_proto_rawDescOnce.Do(func() { ++ file_internal_proto_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_common_common_proto_rawDescData) ++ }) ++ return file_internal_proto_common_common_proto_rawDescData ++} ++ ++var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) ++var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) ++var file_internal_proto_common_common_proto_goTypes = []interface{}{ ++ (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite ++ (TLSVersion)(0), // 1: s2a.proto.TLSVersion ++ (*Identity)(nil), // 2: s2a.proto.Identity ++ nil, // 3: s2a.proto.Identity.AttributesEntry ++} ++var file_internal_proto_common_common_proto_depIdxs = []int32{ ++ 3, // 0: s2a.proto.Identity.attributes:type_name -> s2a.proto.Identity.AttributesEntry ++ 1, // [1:1] is the sub-list for method output_type ++ 1, // [1:1] is the sub-list for method input_type ++ 1, // [1:1] is the sub-list for extension type_name ++ 1, // [1:1] is the sub-list for extension extendee ++ 0, // [0:1] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_common_common_proto_init() } ++func file_internal_proto_common_common_proto_init() { ++ if File_internal_proto_common_common_proto != nil { ++ return ++ } ++ if !protoimpl.UnsafeEnabled { ++ file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*Identity); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ } ++ file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ ++ (*Identity_SpiffeId)(nil), ++ (*Identity_Hostname)(nil), ++ (*Identity_Uid)(nil), ++ (*Identity_MdbUsername)(nil), ++ (*Identity_GaiaId)(nil), ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_common_common_proto_rawDesc, ++ NumEnums: 2, ++ NumMessages: 2, ++ NumExtensions: 0, ++ NumServices: 0, ++ }, ++ GoTypes: file_internal_proto_common_common_proto_goTypes, ++ DependencyIndexes: file_internal_proto_common_common_proto_depIdxs, ++ EnumInfos: file_internal_proto_common_common_proto_enumTypes, ++ MessageInfos: file_internal_proto_common_common_proto_msgTypes, ++ }.Build() ++ File_internal_proto_common_common_proto = out.File ++ file_internal_proto_common_common_proto_rawDesc = nil ++ file_internal_proto_common_common_proto_goTypes = nil ++ file_internal_proto_common_common_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +new file mode 100644 +index 00000000000..f4f763ae102 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +@@ -0,0 +1,267 @@ ++// Copyright 2021 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/s2a_context/s2a_context.proto ++ ++package s2a_context_go_proto ++ ++import ( ++ common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++type S2AContext struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The application protocol negotiated for this connection, e.g., 'grpc'. ++ ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` ++ // The TLS version number that the S2A's handshaker module used to set up the ++ // session. ++ TlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` ++ // The TLS ciphersuite negotiated by the S2A's handshaker module. ++ Ciphersuite common_go_proto.Ciphersuite `protobuf:"varint,3,opt,name=ciphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"ciphersuite,omitempty"` ++ // The authenticated identity of the peer. ++ PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` ++ // The local identity used during session setup. This could be: ++ // - The local identity that the client specifies in ClientSessionStartReq. ++ // - One of the local identities that the server specifies in ++ // ServerSessionStartReq. ++ // - If neither client or server specifies local identities, the S2A picks the ++ // default one. In this case, this field will contain that identity. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The SHA256 hash of the peer certificate used in the handshake. ++ PeerCertFingerprint []byte `protobuf:"bytes,6,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` ++ // The SHA256 hash of the local certificate used in the handshake. ++ LocalCertFingerprint []byte `protobuf:"bytes,7,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` ++ // Set to true if a cached session was reused to resume the handshake. ++ IsHandshakeResumed bool `protobuf:"varint,8,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` ++} ++ ++func (x *S2AContext) Reset() { ++ *x = S2AContext{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *S2AContext) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*S2AContext) ProtoMessage() {} ++ ++func (x *S2AContext) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. ++func (*S2AContext) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} ++} ++ ++func (x *S2AContext) GetApplicationProtocol() string { ++ if x != nil { ++ return x.ApplicationProtocol ++ } ++ return "" ++} ++ ++func (x *S2AContext) GetTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.TlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *S2AContext) GetCiphersuite() common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.Ciphersuite ++ } ++ return common_go_proto.Ciphersuite(0) ++} ++ ++func (x *S2AContext) GetPeerIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.PeerIdentity ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetPeerCertFingerprint() []byte { ++ if x != nil { ++ return x.PeerCertFingerprint ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetLocalCertFingerprint() []byte { ++ if x != nil { ++ return x.LocalCertFingerprint ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetIsHandshakeResumed() bool { ++ if x != nil { ++ return x.IsHandshakeResumed ++ } ++ return false ++} ++ ++var File_internal_proto_s2a_context_s2a_context_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_s2a_context_s2a_context_proto_rawDesc = []byte{ ++ 0x0a, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, ++ 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, ++ 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, ++ 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, ++ 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x03, ++ 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x31, 0x0a, 0x14, ++ 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, ++ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, ++ 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, ++ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, ++ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, ++ 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, ++ 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, ++ 0x75, 0x69, 0x74, 0x65, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, ++ 0x65, 0x12, 0x38, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, ++ 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, ++ 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, ++ 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, ++ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, 0x72, 0x5f, ++ 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, ++ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, ++ 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6c, ++ 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, ++ 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, ++ 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, ++ 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, ++ 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, ++ 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, ++ 0x6d, 0x65, 0x64, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, ++ 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, ++ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, 0x61, 0x5f, ++ 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce sync.Once ++ file_internal_proto_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_s2a_context_s2a_context_proto_rawDesc ++) ++ ++func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { ++ file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { ++ file_internal_proto_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_context_s2a_context_proto_rawDescData) ++ }) ++ return file_internal_proto_s2a_context_s2a_context_proto_rawDescData ++} ++ ++var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) ++var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ ++ (*S2AContext)(nil), // 0: s2a.proto.S2AContext ++ (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion ++ (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite ++ (*common_go_proto.Identity)(nil), // 3: s2a.proto.Identity ++} ++var file_internal_proto_s2a_context_s2a_context_proto_depIdxs = []int32{ ++ 1, // 0: s2a.proto.S2AContext.tls_version:type_name -> s2a.proto.TLSVersion ++ 2, // 1: s2a.proto.S2AContext.ciphersuite:type_name -> s2a.proto.Ciphersuite ++ 3, // 2: s2a.proto.S2AContext.peer_identity:type_name -> s2a.proto.Identity ++ 3, // 3: s2a.proto.S2AContext.local_identity:type_name -> s2a.proto.Identity ++ 4, // [4:4] is the sub-list for method output_type ++ 4, // [4:4] is the sub-list for method input_type ++ 4, // [4:4] is the sub-list for extension type_name ++ 4, // [4:4] is the sub-list for extension extendee ++ 0, // [0:4] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_s2a_context_s2a_context_proto_init() } ++func file_internal_proto_s2a_context_s2a_context_proto_init() { ++ if File_internal_proto_s2a_context_s2a_context_proto != nil { ++ return ++ } ++ if !protoimpl.UnsafeEnabled { ++ file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*S2AContext); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_s2a_context_s2a_context_proto_rawDesc, ++ NumEnums: 0, ++ NumMessages: 1, ++ NumExtensions: 0, ++ NumServices: 0, ++ }, ++ GoTypes: file_internal_proto_s2a_context_s2a_context_proto_goTypes, ++ DependencyIndexes: file_internal_proto_s2a_context_s2a_context_proto_depIdxs, ++ MessageInfos: file_internal_proto_s2a_context_s2a_context_proto_msgTypes, ++ }.Build() ++ File_internal_proto_s2a_context_s2a_context_proto = out.File ++ file_internal_proto_s2a_context_s2a_context_proto_rawDesc = nil ++ file_internal_proto_s2a_context_s2a_context_proto_goTypes = nil ++ file_internal_proto_s2a_context_s2a_context_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +new file mode 100644 +index 00000000000..0a86ebee592 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +@@ -0,0 +1,1377 @@ ++// Copyright 2021 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/s2a/s2a.proto ++ ++package s2a_go_proto ++ ++import ( ++ common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++type AuthenticationMechanism struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // (Optional) Application may specify an identity associated to an ++ // authentication mechanism. Otherwise, S2A assumes that the authentication ++ // mechanism is associated with the default identity. If the default identity ++ // cannot be determined, session setup fails. ++ Identity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` ++ // Types that are assignable to MechanismOneof: ++ // ++ // *AuthenticationMechanism_Token ++ MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` ++} ++ ++func (x *AuthenticationMechanism) Reset() { ++ *x = AuthenticationMechanism{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *AuthenticationMechanism) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*AuthenticationMechanism) ProtoMessage() {} ++ ++func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. ++func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{0} ++} ++ ++func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.Identity ++ } ++ return nil ++} ++ ++func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { ++ if m != nil { ++ return m.MechanismOneof ++ } ++ return nil ++} ++ ++func (x *AuthenticationMechanism) GetToken() string { ++ if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { ++ return x.Token ++ } ++ return "" ++} ++ ++type isAuthenticationMechanism_MechanismOneof interface { ++ isAuthenticationMechanism_MechanismOneof() ++} ++ ++type AuthenticationMechanism_Token struct { ++ // A token that the application uses to authenticate itself to the S2A. ++ Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` ++} ++ ++func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} ++ ++type ClientSessionStartReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The application protocols supported by the client, e.g., "grpc". ++ ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` ++ // (Optional) The minimum TLS version number that the S2A's handshaker module ++ // will use to set up the session. If this field is not provided, S2A will use ++ // the minimum version it supports. ++ MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` ++ // (Optional) The maximum TLS version number that the S2A's handshaker module ++ // will use to set up the session. If this field is not provided, S2A will use ++ // the maximum version it supports. ++ MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` ++ // The TLS ciphersuites that the client is willing to support. ++ TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` ++ // (Optional) Describes which server identities are acceptable by the client. ++ // If target identities are provided and none of them matches the peer ++ // identity of the server, session setup fails. ++ TargetIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` ++ // (Optional) Application may specify a local identity. Otherwise, S2A chooses ++ // the default local identity. If the default identity cannot be determined, ++ // session setup fails. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,6,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The target name that is used by S2A to configure SNI in the TLS handshake. ++ // It is also used to perform server authorization check if avaiable. This ++ // check is intended to verify that the peer authenticated identity is ++ // authorized to run a service with the target name. ++ // This field MUST only contain the host portion of the server address. It ++ // MUST not contain the scheme or the port number. For example, if the server ++ // address is dns://www.example.com:443, the value of this field should be ++ // set to www.example.com. ++ TargetName string `protobuf:"bytes,7,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` ++} ++ ++func (x *ClientSessionStartReq) Reset() { ++ *x = ClientSessionStartReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ClientSessionStartReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ClientSessionStartReq) ProtoMessage() {} ++ ++func (x *ClientSessionStartReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ClientSessionStartReq.ProtoReflect.Descriptor instead. ++func (*ClientSessionStartReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{1} ++} ++ ++func (x *ClientSessionStartReq) GetApplicationProtocols() []string { ++ if x != nil { ++ return x.ApplicationProtocols ++ } ++ return nil ++} ++ ++func (x *ClientSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MinTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *ClientSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MaxTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *ClientSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.TlsCiphersuites ++ } ++ return nil ++} ++ ++func (x *ClientSessionStartReq) GetTargetIdentities() []*common_go_proto.Identity { ++ if x != nil { ++ return x.TargetIdentities ++ } ++ return nil ++} ++ ++func (x *ClientSessionStartReq) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *ClientSessionStartReq) GetTargetName() string { ++ if x != nil { ++ return x.TargetName ++ } ++ return "" ++} ++ ++type ServerSessionStartReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The application protocols supported by the server, e.g., "grpc". ++ ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` ++ // (Optional) The minimum TLS version number that the S2A's handshaker module ++ // will use to set up the session. If this field is not provided, S2A will use ++ // the minimum version it supports. ++ MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` ++ // (Optional) The maximum TLS version number that the S2A's handshaker module ++ // will use to set up the session. If this field is not provided, S2A will use ++ // the maximum version it supports. ++ MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` ++ // The TLS ciphersuites that the server is willing to support. ++ TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` ++ // (Optional) A list of local identities supported by the server, if ++ // specified. Otherwise, S2A chooses the default local identity. If the ++ // default identity cannot be determined, session setup fails. ++ LocalIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` ++ // The byte representation of the first handshake message received from the ++ // client peer. It is possible that this first message is split into multiple ++ // chunks. In this case, the first chunk is sent using this field and the ++ // following chunks are sent using the in_bytes field of SessionNextReq ++ // Specifically, if the client peer is using S2A, this field contains the ++ // bytes in the out_frames field of SessionResp message that the client peer ++ // received from its S2A after initiating the handshake. ++ InBytes []byte `protobuf:"bytes,6,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` ++} ++ ++func (x *ServerSessionStartReq) Reset() { ++ *x = ServerSessionStartReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ServerSessionStartReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ServerSessionStartReq) ProtoMessage() {} ++ ++func (x *ServerSessionStartReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ServerSessionStartReq.ProtoReflect.Descriptor instead. ++func (*ServerSessionStartReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{2} ++} ++ ++func (x *ServerSessionStartReq) GetApplicationProtocols() []string { ++ if x != nil { ++ return x.ApplicationProtocols ++ } ++ return nil ++} ++ ++func (x *ServerSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MinTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *ServerSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MaxTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *ServerSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.TlsCiphersuites ++ } ++ return nil ++} ++ ++func (x *ServerSessionStartReq) GetLocalIdentities() []*common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentities ++ } ++ return nil ++} ++ ++func (x *ServerSessionStartReq) GetInBytes() []byte { ++ if x != nil { ++ return x.InBytes ++ } ++ return nil ++} ++ ++type SessionNextReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The byte representation of session setup, i.e., handshake messages. ++ // Specifically: ++ // - All handshake messages sent from the server to the client. ++ // - All, except for the first, handshake messages sent from the client to ++ // the server. Note that the first message is communicated to S2A using the ++ // in_bytes field of ServerSessionStartReq. ++ // ++ // If the peer is using S2A, this field contains the bytes in the out_frames ++ // field of SessionResp message that the peer received from its S2A. ++ InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` ++} ++ ++func (x *SessionNextReq) Reset() { ++ *x = SessionNextReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionNextReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionNextReq) ProtoMessage() {} ++ ++func (x *SessionNextReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionNextReq.ProtoReflect.Descriptor instead. ++func (*SessionNextReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{3} ++} ++ ++func (x *SessionNextReq) GetInBytes() []byte { ++ if x != nil { ++ return x.InBytes ++ } ++ return nil ++} ++ ++type ResumptionTicketReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The byte representation of a NewSessionTicket message received from the ++ // server. ++ InBytes [][]byte `protobuf:"bytes,1,rep,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` ++ // A connection identifier that was created and sent by S2A at the end of a ++ // handshake. ++ ConnectionId uint64 `protobuf:"varint,2,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` ++ // The local identity that was used by S2A during session setup and included ++ // in |SessionResult|. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++} ++ ++func (x *ResumptionTicketReq) Reset() { ++ *x = ResumptionTicketReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ResumptionTicketReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ResumptionTicketReq) ProtoMessage() {} ++ ++func (x *ResumptionTicketReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ResumptionTicketReq.ProtoReflect.Descriptor instead. ++func (*ResumptionTicketReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{4} ++} ++ ++func (x *ResumptionTicketReq) GetInBytes() [][]byte { ++ if x != nil { ++ return x.InBytes ++ } ++ return nil ++} ++ ++func (x *ResumptionTicketReq) GetConnectionId() uint64 { ++ if x != nil { ++ return x.ConnectionId ++ } ++ return 0 ++} ++ ++func (x *ResumptionTicketReq) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++type SessionReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // Types that are assignable to ReqOneof: ++ // ++ // *SessionReq_ClientStart ++ // *SessionReq_ServerStart ++ // *SessionReq_Next ++ // *SessionReq_ResumptionTicket ++ ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` ++ // (Optional) The authentication mechanisms that the client wishes to use to ++ // authenticate to the S2A, ordered by preference. The S2A will always use the ++ // first authentication mechanism that appears in the list and is supported by ++ // the S2A. ++ AuthMechanisms []*AuthenticationMechanism `protobuf:"bytes,5,rep,name=auth_mechanisms,json=authMechanisms,proto3" json:"auth_mechanisms,omitempty"` ++} ++ ++func (x *SessionReq) Reset() { ++ *x = SessionReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionReq) ProtoMessage() {} ++ ++func (x *SessionReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. ++func (*SessionReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{5} ++} ++ ++func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { ++ if m != nil { ++ return m.ReqOneof ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetClientStart() *ClientSessionStartReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_ClientStart); ok { ++ return x.ClientStart ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetServerStart() *ServerSessionStartReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_ServerStart); ok { ++ return x.ServerStart ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetNext() *SessionNextReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_Next); ok { ++ return x.Next ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetResumptionTicket() *ResumptionTicketReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_ResumptionTicket); ok { ++ return x.ResumptionTicket ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetAuthMechanisms() []*AuthenticationMechanism { ++ if x != nil { ++ return x.AuthMechanisms ++ } ++ return nil ++} ++ ++type isSessionReq_ReqOneof interface { ++ isSessionReq_ReqOneof() ++} ++ ++type SessionReq_ClientStart struct { ++ // The client session setup request message. ++ ClientStart *ClientSessionStartReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` ++} ++ ++type SessionReq_ServerStart struct { ++ // The server session setup request message. ++ ServerStart *ServerSessionStartReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` ++} ++ ++type SessionReq_Next struct { ++ // The next session setup message request message. ++ Next *SessionNextReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` ++} ++ ++type SessionReq_ResumptionTicket struct { ++ // The resumption ticket that is received from the server. This message is ++ // only accepted by S2A if it is running as a client and if it is received ++ // after session setup is complete. If S2A is running as a server and it ++ // receives this message, the session is terminated. ++ ResumptionTicket *ResumptionTicketReq `protobuf:"bytes,4,opt,name=resumption_ticket,json=resumptionTicket,proto3,oneof"` ++} ++ ++func (*SessionReq_ClientStart) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_ServerStart) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_Next) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_ResumptionTicket) isSessionReq_ReqOneof() {} ++ ++type SessionState struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The TLS version number that the S2A's handshaker module used to set up the ++ // session. ++ TlsVersion common_go_proto.TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` ++ // The TLS ciphersuite negotiated by the S2A's handshaker module. ++ TlsCiphersuite common_go_proto.Ciphersuite `protobuf:"varint,2,opt,name=tls_ciphersuite,json=tlsCiphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuite,omitempty"` ++ // The sequence number of the next, incoming, TLS record. ++ InSequence uint64 `protobuf:"varint,3,opt,name=in_sequence,json=inSequence,proto3" json:"in_sequence,omitempty"` ++ // The sequence number of the next, outgoing, TLS record. ++ OutSequence uint64 `protobuf:"varint,4,opt,name=out_sequence,json=outSequence,proto3" json:"out_sequence,omitempty"` ++ // The key for the inbound direction. ++ InKey []byte `protobuf:"bytes,5,opt,name=in_key,json=inKey,proto3" json:"in_key,omitempty"` ++ // The key for the outbound direction. ++ OutKey []byte `protobuf:"bytes,6,opt,name=out_key,json=outKey,proto3" json:"out_key,omitempty"` ++ // The constant part of the record nonce for the outbound direction. ++ InFixedNonce []byte `protobuf:"bytes,7,opt,name=in_fixed_nonce,json=inFixedNonce,proto3" json:"in_fixed_nonce,omitempty"` ++ // The constant part of the record nonce for the inbound direction. ++ OutFixedNonce []byte `protobuf:"bytes,8,opt,name=out_fixed_nonce,json=outFixedNonce,proto3" json:"out_fixed_nonce,omitempty"` ++ // A connection identifier that can be provided to S2A to perform operations ++ // related to this connection. This identifier will be stored by the record ++ // protocol, and included in the |ResumptionTicketReq| message that is later ++ // sent back to S2A. This field is set only for client-side connections. ++ ConnectionId uint64 `protobuf:"varint,9,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` ++ // Set to true if a cached session was reused to do an abbreviated handshake. ++ IsHandshakeResumed bool `protobuf:"varint,10,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` ++} ++ ++func (x *SessionState) Reset() { ++ *x = SessionState{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionState) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionState) ProtoMessage() {} ++ ++func (x *SessionState) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionState.ProtoReflect.Descriptor instead. ++func (*SessionState) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{6} ++} ++ ++func (x *SessionState) GetTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.TlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *SessionState) GetTlsCiphersuite() common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.TlsCiphersuite ++ } ++ return common_go_proto.Ciphersuite(0) ++} ++ ++func (x *SessionState) GetInSequence() uint64 { ++ if x != nil { ++ return x.InSequence ++ } ++ return 0 ++} ++ ++func (x *SessionState) GetOutSequence() uint64 { ++ if x != nil { ++ return x.OutSequence ++ } ++ return 0 ++} ++ ++func (x *SessionState) GetInKey() []byte { ++ if x != nil { ++ return x.InKey ++ } ++ return nil ++} ++ ++func (x *SessionState) GetOutKey() []byte { ++ if x != nil { ++ return x.OutKey ++ } ++ return nil ++} ++ ++func (x *SessionState) GetInFixedNonce() []byte { ++ if x != nil { ++ return x.InFixedNonce ++ } ++ return nil ++} ++ ++func (x *SessionState) GetOutFixedNonce() []byte { ++ if x != nil { ++ return x.OutFixedNonce ++ } ++ return nil ++} ++ ++func (x *SessionState) GetConnectionId() uint64 { ++ if x != nil { ++ return x.ConnectionId ++ } ++ return 0 ++} ++ ++func (x *SessionState) GetIsHandshakeResumed() bool { ++ if x != nil { ++ return x.IsHandshakeResumed ++ } ++ return false ++} ++ ++type SessionResult struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The application protocol negotiated for this session. ++ ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` ++ // The session state at the end. This state contains all cryptographic ++ // material required to initialize the record protocol object. ++ State *SessionState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` ++ // The authenticated identity of the peer. ++ PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` ++ // The local identity used during session setup. This could be: ++ // - The local identity that the client specifies in ClientSessionStartReq. ++ // - One of the local identities that the server specifies in ++ // ServerSessionStartReq. ++ // - If neither client or server specifies local identities, the S2A picks the ++ // default one. In this case, this field will contain that identity. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The SHA256 hash of the local certificate used in the handshake. ++ LocalCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` ++ // The SHA256 hash of the peer certificate used in the handshake. ++ PeerCertFingerprint []byte `protobuf:"bytes,7,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` ++} ++ ++func (x *SessionResult) Reset() { ++ *x = SessionResult{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionResult) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionResult) ProtoMessage() {} ++ ++func (x *SessionResult) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionResult.ProtoReflect.Descriptor instead. ++func (*SessionResult) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{7} ++} ++ ++func (x *SessionResult) GetApplicationProtocol() string { ++ if x != nil { ++ return x.ApplicationProtocol ++ } ++ return "" ++} ++ ++func (x *SessionResult) GetState() *SessionState { ++ if x != nil { ++ return x.State ++ } ++ return nil ++} ++ ++func (x *SessionResult) GetPeerIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.PeerIdentity ++ } ++ return nil ++} ++ ++func (x *SessionResult) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *SessionResult) GetLocalCertFingerprint() []byte { ++ if x != nil { ++ return x.LocalCertFingerprint ++ } ++ return nil ++} ++ ++func (x *SessionResult) GetPeerCertFingerprint() []byte { ++ if x != nil { ++ return x.PeerCertFingerprint ++ } ++ return nil ++} ++ ++type SessionStatus struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The status code that is specific to the application and the implementation ++ // of S2A, e.g., gRPC status code. ++ Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` ++ // The status details. ++ Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` ++} ++ ++func (x *SessionStatus) Reset() { ++ *x = SessionStatus{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionStatus) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionStatus) ProtoMessage() {} ++ ++func (x *SessionStatus) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionStatus.ProtoReflect.Descriptor instead. ++func (*SessionStatus) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{8} ++} ++ ++func (x *SessionStatus) GetCode() uint32 { ++ if x != nil { ++ return x.Code ++ } ++ return 0 ++} ++ ++func (x *SessionStatus) GetDetails() string { ++ if x != nil { ++ return x.Details ++ } ++ return "" ++} ++ ++type SessionResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The local identity used during session setup. This could be: ++ // - The local identity that the client specifies in ClientSessionStartReq. ++ // - One of the local identities that the server specifies in ++ // ServerSessionStartReq. ++ // - If neither client or server specifies local identities, the S2A picks the ++ // default one. In this case, this field will contain that identity. ++ // ++ // If the SessionResult is populated, then this must coincide with the local ++ // identity specified in the SessionResult; otherwise, the handshake must ++ // fail. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The byte representation of the frames that should be sent to the peer. May ++ // be empty if nothing needs to be sent to the peer or if in_bytes in the ++ // SessionReq is incomplete. All bytes in a non-empty out_frames must be sent ++ // to the peer even if the session setup status is not OK as these frames may ++ // contain appropriate alerts. ++ OutFrames []byte `protobuf:"bytes,2,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` ++ // Number of bytes in the in_bytes field that are consumed by S2A. It is ++ // possible that part of in_bytes is unrelated to the session setup process. ++ BytesConsumed uint32 `protobuf:"varint,3,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` ++ // This is set if the session is successfully set up. out_frames may ++ // still be set to frames that needs to be forwarded to the peer. ++ Result *SessionResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` ++ // Status of session setup at the current stage. ++ Status *SessionStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` ++} ++ ++func (x *SessionResp) Reset() { ++ *x = SessionResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionResp) ProtoMessage() {} ++ ++func (x *SessionResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. ++func (*SessionResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{9} ++} ++ ++func (x *SessionResp) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetOutFrames() []byte { ++ if x != nil { ++ return x.OutFrames ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetBytesConsumed() uint32 { ++ if x != nil { ++ return x.BytesConsumed ++ } ++ return 0 ++} ++ ++func (x *SessionResp) GetResult() *SessionResult { ++ if x != nil { ++ return x.Result ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetStatus() *SessionStatus { ++ if x != nil { ++ return x.Status ++ } ++ return nil ++} ++ ++var File_internal_proto_s2a_s2a_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_s2a_s2a_proto_rawDesc = []byte{ ++ 0x0a, 0x1c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, ++ 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, ++ 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, ++ 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x75, 0x0a, ++ 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, ++ 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, ++ 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, ++ 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, ++ 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, ++ 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, ++ 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xac, 0x03, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, ++ 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, ++ 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, ++ 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, ++ 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, ++ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, ++ 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, ++ 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, ++ 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, ++ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, ++ 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, ++ 0x6e, 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, ++ 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, ++ 0x69, 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, ++ 0x69, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x11, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, ++ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, ++ 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, ++ 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, ++ 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, ++ 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, ++ 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, ++ 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, ++ 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, ++ 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, ++ 0x61, 0x6d, 0x65, 0x22, 0xe8, 0x02, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, ++ 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, ++ 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, ++ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, ++ 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, ++ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, ++ 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, ++ 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, ++ 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, ++ 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, ++ 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, ++ 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, ++ 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, ++ 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, ++ 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, ++ 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, ++ 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, ++ 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, ++ 0x0a, 0x0e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x71, ++ 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, ++ 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x13, ++ 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, ++ 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, ++ 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, ++ 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, ++ 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, ++ 0x6e, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, ++ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, ++ 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, ++ 0xf4, 0x02, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, ++ 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, ++ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, ++ 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, ++ 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, ++ 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, ++ 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, ++ 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x04, ++ 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, ++ 0x78, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x12, 0x4d, 0x0a, ++ 0x11, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, ++ 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, ++ 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x10, 0x72, 0x65, 0x73, 0x75, ++ 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x4b, 0x0a, 0x0f, ++ 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, ++ 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, ++ 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x4d, ++ 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, ++ 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa0, 0x03, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69, ++ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, ++ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, ++ 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, ++ 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, ++ 0x3f, 0x0a, 0x0f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, ++ 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, ++ 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, ++ 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, ++ 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x69, 0x6e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, ++ 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, ++ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x71, 0x75, ++ 0x65, 0x6e, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, ++ 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6f, ++ 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, ++ 0x74, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, ++ 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, ++ 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x75, ++ 0x74, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, ++ 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, ++ 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, ++ 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, ++ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, ++ 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, ++ 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, ++ 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x22, 0xd1, 0x02, 0x0a, 0x0d, 0x53, 0x65, ++ 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, ++ 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, ++ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2d, ++ 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, ++ 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, ++ 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, ++ 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, ++ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, ++ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, ++ 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, ++ 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, ++ 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, ++ 0x69, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, ++ 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, ++ 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, ++ 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, ++ 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, ++ 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, ++ 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, 0x3d, 0x0a, ++ 0x0d, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, ++ 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, ++ 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, ++ 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, ++ 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3a, 0x0a, 0x0e, ++ 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, ++ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, ++ 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, ++ 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, ++ 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, ++ 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, ++ 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x30, ++ 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, ++ 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, ++ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, ++ 0x12, 0x30, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, ++ 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, ++ 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, ++ 0x75, 0x73, 0x32, 0x51, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, ++ 0x12, 0x43, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, ++ 0x12, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, ++ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, ++ 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, ++ 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, ++ 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, ++ 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_s2a_s2a_proto_rawDescOnce sync.Once ++ file_internal_proto_s2a_s2a_proto_rawDescData = file_internal_proto_s2a_s2a_proto_rawDesc ++) ++ ++func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { ++ file_internal_proto_s2a_s2a_proto_rawDescOnce.Do(func() { ++ file_internal_proto_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_s2a_proto_rawDescData) ++ }) ++ return file_internal_proto_s2a_s2a_proto_rawDescData ++} ++ ++var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) ++var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ ++ (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism ++ (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq ++ (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq ++ (*SessionNextReq)(nil), // 3: s2a.proto.SessionNextReq ++ (*ResumptionTicketReq)(nil), // 4: s2a.proto.ResumptionTicketReq ++ (*SessionReq)(nil), // 5: s2a.proto.SessionReq ++ (*SessionState)(nil), // 6: s2a.proto.SessionState ++ (*SessionResult)(nil), // 7: s2a.proto.SessionResult ++ (*SessionStatus)(nil), // 8: s2a.proto.SessionStatus ++ (*SessionResp)(nil), // 9: s2a.proto.SessionResp ++ (*common_go_proto.Identity)(nil), // 10: s2a.proto.Identity ++ (common_go_proto.TLSVersion)(0), // 11: s2a.proto.TLSVersion ++ (common_go_proto.Ciphersuite)(0), // 12: s2a.proto.Ciphersuite ++} ++var file_internal_proto_s2a_s2a_proto_depIdxs = []int32{ ++ 10, // 0: s2a.proto.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity ++ 11, // 1: s2a.proto.ClientSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion ++ 11, // 2: s2a.proto.ClientSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion ++ 12, // 3: s2a.proto.ClientSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite ++ 10, // 4: s2a.proto.ClientSessionStartReq.target_identities:type_name -> s2a.proto.Identity ++ 10, // 5: s2a.proto.ClientSessionStartReq.local_identity:type_name -> s2a.proto.Identity ++ 11, // 6: s2a.proto.ServerSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion ++ 11, // 7: s2a.proto.ServerSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion ++ 12, // 8: s2a.proto.ServerSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite ++ 10, // 9: s2a.proto.ServerSessionStartReq.local_identities:type_name -> s2a.proto.Identity ++ 10, // 10: s2a.proto.ResumptionTicketReq.local_identity:type_name -> s2a.proto.Identity ++ 1, // 11: s2a.proto.SessionReq.client_start:type_name -> s2a.proto.ClientSessionStartReq ++ 2, // 12: s2a.proto.SessionReq.server_start:type_name -> s2a.proto.ServerSessionStartReq ++ 3, // 13: s2a.proto.SessionReq.next:type_name -> s2a.proto.SessionNextReq ++ 4, // 14: s2a.proto.SessionReq.resumption_ticket:type_name -> s2a.proto.ResumptionTicketReq ++ 0, // 15: s2a.proto.SessionReq.auth_mechanisms:type_name -> s2a.proto.AuthenticationMechanism ++ 11, // 16: s2a.proto.SessionState.tls_version:type_name -> s2a.proto.TLSVersion ++ 12, // 17: s2a.proto.SessionState.tls_ciphersuite:type_name -> s2a.proto.Ciphersuite ++ 6, // 18: s2a.proto.SessionResult.state:type_name -> s2a.proto.SessionState ++ 10, // 19: s2a.proto.SessionResult.peer_identity:type_name -> s2a.proto.Identity ++ 10, // 20: s2a.proto.SessionResult.local_identity:type_name -> s2a.proto.Identity ++ 10, // 21: s2a.proto.SessionResp.local_identity:type_name -> s2a.proto.Identity ++ 7, // 22: s2a.proto.SessionResp.result:type_name -> s2a.proto.SessionResult ++ 8, // 23: s2a.proto.SessionResp.status:type_name -> s2a.proto.SessionStatus ++ 5, // 24: s2a.proto.S2AService.SetUpSession:input_type -> s2a.proto.SessionReq ++ 9, // 25: s2a.proto.S2AService.SetUpSession:output_type -> s2a.proto.SessionResp ++ 25, // [25:26] is the sub-list for method output_type ++ 24, // [24:25] is the sub-list for method input_type ++ 24, // [24:24] is the sub-list for extension type_name ++ 24, // [24:24] is the sub-list for extension extendee ++ 0, // [0:24] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_s2a_s2a_proto_init() } ++func file_internal_proto_s2a_s2a_proto_init() { ++ if File_internal_proto_s2a_s2a_proto != nil { ++ return ++ } ++ if !protoimpl.UnsafeEnabled { ++ file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*AuthenticationMechanism); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ClientSessionStartReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ServerSessionStartReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionNextReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ResumptionTicketReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionState); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionResult); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionStatus); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ ++ (*AuthenticationMechanism_Token)(nil), ++ } ++ file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ ++ (*SessionReq_ClientStart)(nil), ++ (*SessionReq_ServerStart)(nil), ++ (*SessionReq_Next)(nil), ++ (*SessionReq_ResumptionTicket)(nil), ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_s2a_s2a_proto_rawDesc, ++ NumEnums: 0, ++ NumMessages: 10, ++ NumExtensions: 0, ++ NumServices: 1, ++ }, ++ GoTypes: file_internal_proto_s2a_s2a_proto_goTypes, ++ DependencyIndexes: file_internal_proto_s2a_s2a_proto_depIdxs, ++ MessageInfos: file_internal_proto_s2a_s2a_proto_msgTypes, ++ }.Build() ++ File_internal_proto_s2a_s2a_proto = out.File ++ file_internal_proto_s2a_s2a_proto_rawDesc = nil ++ file_internal_proto_s2a_s2a_proto_goTypes = nil ++ file_internal_proto_s2a_s2a_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +new file mode 100644 +index 00000000000..0fa582fc874 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +@@ -0,0 +1,173 @@ ++// Copyright 2021 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go-grpc. DO NOT EDIT. ++// versions: ++// - protoc-gen-go-grpc v1.3.0 ++// - protoc v3.21.12 ++// source: internal/proto/s2a/s2a.proto ++ ++package s2a_go_proto ++ ++import ( ++ context "context" ++ grpc "google.golang.org/grpc" ++ codes "google.golang.org/grpc/codes" ++ status "google.golang.org/grpc/status" ++) ++ ++// This is a compile-time assertion to ensure that this generated file ++// is compatible with the grpc package it is being compiled against. ++// Requires gRPC-Go v1.32.0 or later. ++const _ = grpc.SupportPackageIsVersion7 ++ ++const ( ++ S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" ++) ++ ++// S2AServiceClient is the client API for S2AService service. ++// ++// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. ++type S2AServiceClient interface { ++ // S2A service accepts a stream of session setup requests and returns a stream ++ // of session setup responses. The client of this service is expected to send ++ // exactly one client_start or server_start message followed by at least one ++ // next message. Applications running TLS clients can send requests with ++ // resumption_ticket messages only after the session is successfully set up. ++ // ++ // Every time S2A client sends a request, this service sends a response. ++ // However, clients do not have to wait for service response before sending ++ // the next request. ++ SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) ++} ++ ++type s2AServiceClient struct { ++ cc grpc.ClientConnInterface ++} ++ ++func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { ++ return &s2AServiceClient{cc} ++} ++ ++func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { ++ stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) ++ if err != nil { ++ return nil, err ++ } ++ x := &s2AServiceSetUpSessionClient{stream} ++ return x, nil ++} ++ ++type S2AService_SetUpSessionClient interface { ++ Send(*SessionReq) error ++ Recv() (*SessionResp, error) ++ grpc.ClientStream ++} ++ ++type s2AServiceSetUpSessionClient struct { ++ grpc.ClientStream ++} ++ ++func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { ++ return x.ClientStream.SendMsg(m) ++} ++ ++func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { ++ m := new(SessionResp) ++ if err := x.ClientStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++// S2AServiceServer is the server API for S2AService service. ++// All implementations must embed UnimplementedS2AServiceServer ++// for forward compatibility ++type S2AServiceServer interface { ++ // S2A service accepts a stream of session setup requests and returns a stream ++ // of session setup responses. The client of this service is expected to send ++ // exactly one client_start or server_start message followed by at least one ++ // next message. Applications running TLS clients can send requests with ++ // resumption_ticket messages only after the session is successfully set up. ++ // ++ // Every time S2A client sends a request, this service sends a response. ++ // However, clients do not have to wait for service response before sending ++ // the next request. ++ SetUpSession(S2AService_SetUpSessionServer) error ++ mustEmbedUnimplementedS2AServiceServer() ++} ++ ++// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. ++type UnimplementedS2AServiceServer struct { ++} ++ ++func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { ++ return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") ++} ++func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} ++ ++// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. ++// Use of this interface is not recommended, as added methods to S2AServiceServer will ++// result in compilation errors. ++type UnsafeS2AServiceServer interface { ++ mustEmbedUnimplementedS2AServiceServer() ++} ++ ++func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { ++ s.RegisterService(&S2AService_ServiceDesc, srv) ++} ++ ++func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { ++ return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) ++} ++ ++type S2AService_SetUpSessionServer interface { ++ Send(*SessionResp) error ++ Recv() (*SessionReq, error) ++ grpc.ServerStream ++} ++ ++type s2AServiceSetUpSessionServer struct { ++ grpc.ServerStream ++} ++ ++func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { ++ return x.ServerStream.SendMsg(m) ++} ++ ++func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { ++ m := new(SessionReq) ++ if err := x.ServerStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. ++// It's only intended for direct use with grpc.RegisterService, ++// and not to be introspected or modified (even as a copy) ++var S2AService_ServiceDesc = grpc.ServiceDesc{ ++ ServiceName: "s2a.proto.S2AService", ++ HandlerType: (*S2AServiceServer)(nil), ++ Methods: []grpc.MethodDesc{}, ++ Streams: []grpc.StreamDesc{ ++ { ++ StreamName: "SetUpSession", ++ Handler: _S2AService_SetUpSession_Handler, ++ ServerStreams: true, ++ ClientStreams: true, ++ }, ++ }, ++ Metadata: "internal/proto/s2a/s2a.proto", ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +new file mode 100644 +index 00000000000..c84bed97748 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +@@ -0,0 +1,367 @@ ++// Copyright 2022 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/v2/common/common.proto ++ ++package common_go_proto ++ ++import ( ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++// The TLS 1.0-1.2 ciphersuites that the application can negotiate when using ++// S2A. ++type Ciphersuite int32 ++ ++const ( ++ Ciphersuite_CIPHERSUITE_UNSPECIFIED Ciphersuite = 0 ++ Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 1 ++ Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 2 ++ Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 3 ++ Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 4 ++ Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 5 ++ Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 6 ++) ++ ++// Enum value maps for Ciphersuite. ++var ( ++ Ciphersuite_name = map[int32]string{ ++ 0: "CIPHERSUITE_UNSPECIFIED", ++ 1: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", ++ 2: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", ++ 3: "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", ++ 4: "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256", ++ 5: "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384", ++ 6: "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", ++ } ++ Ciphersuite_value = map[string]int32{ ++ "CIPHERSUITE_UNSPECIFIED": 0, ++ "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": 1, ++ "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": 2, ++ "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": 3, ++ "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256": 4, ++ "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384": 5, ++ "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": 6, ++ } ++) ++ ++func (x Ciphersuite) Enum() *Ciphersuite { ++ p := new(Ciphersuite) ++ *p = x ++ return p ++} ++ ++func (x Ciphersuite) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_common_common_proto_enumTypes[0].Descriptor() ++} ++ ++func (Ciphersuite) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_common_common_proto_enumTypes[0] ++} ++ ++func (x Ciphersuite) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use Ciphersuite.Descriptor instead. ++func (Ciphersuite) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} ++} ++ ++// The TLS versions supported by S2A's handshaker module. ++type TLSVersion int32 ++ ++const ( ++ TLSVersion_TLS_VERSION_UNSPECIFIED TLSVersion = 0 ++ TLSVersion_TLS_VERSION_1_0 TLSVersion = 1 ++ TLSVersion_TLS_VERSION_1_1 TLSVersion = 2 ++ TLSVersion_TLS_VERSION_1_2 TLSVersion = 3 ++ TLSVersion_TLS_VERSION_1_3 TLSVersion = 4 ++) ++ ++// Enum value maps for TLSVersion. ++var ( ++ TLSVersion_name = map[int32]string{ ++ 0: "TLS_VERSION_UNSPECIFIED", ++ 1: "TLS_VERSION_1_0", ++ 2: "TLS_VERSION_1_1", ++ 3: "TLS_VERSION_1_2", ++ 4: "TLS_VERSION_1_3", ++ } ++ TLSVersion_value = map[string]int32{ ++ "TLS_VERSION_UNSPECIFIED": 0, ++ "TLS_VERSION_1_0": 1, ++ "TLS_VERSION_1_1": 2, ++ "TLS_VERSION_1_2": 3, ++ "TLS_VERSION_1_3": 4, ++ } ++) ++ ++func (x TLSVersion) Enum() *TLSVersion { ++ p := new(TLSVersion) ++ *p = x ++ return p ++} ++ ++func (x TLSVersion) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_common_common_proto_enumTypes[1].Descriptor() ++} ++ ++func (TLSVersion) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_common_common_proto_enumTypes[1] ++} ++ ++func (x TLSVersion) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use TLSVersion.Descriptor instead. ++func (TLSVersion) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{1} ++} ++ ++// The side in the TLS connection. ++type ConnectionSide int32 ++ ++const ( ++ ConnectionSide_CONNECTION_SIDE_UNSPECIFIED ConnectionSide = 0 ++ ConnectionSide_CONNECTION_SIDE_CLIENT ConnectionSide = 1 ++ ConnectionSide_CONNECTION_SIDE_SERVER ConnectionSide = 2 ++) ++ ++// Enum value maps for ConnectionSide. ++var ( ++ ConnectionSide_name = map[int32]string{ ++ 0: "CONNECTION_SIDE_UNSPECIFIED", ++ 1: "CONNECTION_SIDE_CLIENT", ++ 2: "CONNECTION_SIDE_SERVER", ++ } ++ ConnectionSide_value = map[string]int32{ ++ "CONNECTION_SIDE_UNSPECIFIED": 0, ++ "CONNECTION_SIDE_CLIENT": 1, ++ "CONNECTION_SIDE_SERVER": 2, ++ } ++) ++ ++func (x ConnectionSide) Enum() *ConnectionSide { ++ p := new(ConnectionSide) ++ *p = x ++ return p ++} ++ ++func (x ConnectionSide) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (ConnectionSide) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_common_common_proto_enumTypes[2].Descriptor() ++} ++ ++func (ConnectionSide) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_common_common_proto_enumTypes[2] ++} ++ ++func (x ConnectionSide) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use ConnectionSide.Descriptor instead. ++func (ConnectionSide) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{2} ++} ++ ++// The ALPN protocols that the application can negotiate during a TLS handshake. ++type AlpnProtocol int32 ++ ++const ( ++ AlpnProtocol_ALPN_PROTOCOL_UNSPECIFIED AlpnProtocol = 0 ++ AlpnProtocol_ALPN_PROTOCOL_GRPC AlpnProtocol = 1 ++ AlpnProtocol_ALPN_PROTOCOL_HTTP2 AlpnProtocol = 2 ++ AlpnProtocol_ALPN_PROTOCOL_HTTP1_1 AlpnProtocol = 3 ++) ++ ++// Enum value maps for AlpnProtocol. ++var ( ++ AlpnProtocol_name = map[int32]string{ ++ 0: "ALPN_PROTOCOL_UNSPECIFIED", ++ 1: "ALPN_PROTOCOL_GRPC", ++ 2: "ALPN_PROTOCOL_HTTP2", ++ 3: "ALPN_PROTOCOL_HTTP1_1", ++ } ++ AlpnProtocol_value = map[string]int32{ ++ "ALPN_PROTOCOL_UNSPECIFIED": 0, ++ "ALPN_PROTOCOL_GRPC": 1, ++ "ALPN_PROTOCOL_HTTP2": 2, ++ "ALPN_PROTOCOL_HTTP1_1": 3, ++ } ++) ++ ++func (x AlpnProtocol) Enum() *AlpnProtocol { ++ p := new(AlpnProtocol) ++ *p = x ++ return p ++} ++ ++func (x AlpnProtocol) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (AlpnProtocol) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_common_common_proto_enumTypes[3].Descriptor() ++} ++ ++func (AlpnProtocol) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_common_common_proto_enumTypes[3] ++} ++ ++func (x AlpnProtocol) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use AlpnProtocol.Descriptor instead. ++func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} ++} ++ ++var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ ++ 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, ++ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, ++ 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, ++ 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, ++ 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, ++ 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, ++ 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, ++ 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, ++ 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, ++ 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, ++ 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, ++ 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, ++ 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, ++ 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, ++ 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, ++ 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, ++ 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, ++ 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, ++ 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, ++ 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, ++ 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, ++ 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, ++ 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, ++ 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, ++ 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, ++ 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, ++ 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, ++ 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, ++ 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, ++ 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, ++ 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, ++ 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, ++ 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, ++ 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, ++ 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, ++ 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, ++ 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, ++ 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, ++ 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, ++ 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, ++ 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, ++ 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, ++ 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, ++ 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, ++ 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, ++ 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, ++ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, ++ 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, ++ 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_v2_common_common_proto_rawDescOnce sync.Once ++ file_internal_proto_v2_common_common_proto_rawDescData = file_internal_proto_v2_common_common_proto_rawDesc ++) ++ ++func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { ++ file_internal_proto_v2_common_common_proto_rawDescOnce.Do(func() { ++ file_internal_proto_v2_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_common_common_proto_rawDescData) ++ }) ++ return file_internal_proto_v2_common_common_proto_rawDescData ++} ++ ++var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) ++var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ ++ (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite ++ (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion ++ (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide ++ (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol ++} ++var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ ++ 0, // [0:0] is the sub-list for method output_type ++ 0, // [0:0] is the sub-list for method input_type ++ 0, // [0:0] is the sub-list for extension type_name ++ 0, // [0:0] is the sub-list for extension extendee ++ 0, // [0:0] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_v2_common_common_proto_init() } ++func file_internal_proto_v2_common_common_proto_init() { ++ if File_internal_proto_v2_common_common_proto != nil { ++ return ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, ++ NumEnums: 4, ++ NumMessages: 0, ++ NumExtensions: 0, ++ NumServices: 0, ++ }, ++ GoTypes: file_internal_proto_v2_common_common_proto_goTypes, ++ DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, ++ EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, ++ }.Build() ++ File_internal_proto_v2_common_common_proto = out.File ++ file_internal_proto_v2_common_common_proto_rawDesc = nil ++ file_internal_proto_v2_common_common_proto_goTypes = nil ++ file_internal_proto_v2_common_common_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +new file mode 100644 +index 00000000000..b7fd871c7a7 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +@@ -0,0 +1,248 @@ ++// Copyright 2022 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/v2/s2a_context/s2a_context.proto ++ ++package s2a_context_go_proto ++ ++import ( ++ common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++type S2AContext struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The SPIFFE ID from the peer leaf certificate, if present. ++ // ++ // This field is only populated if the leaf certificate is a valid SPIFFE ++ // SVID; in particular, there is a unique URI SAN and this URI SAN is a valid ++ // SPIFFE ID. ++ LeafCertSpiffeId string `protobuf:"bytes,1,opt,name=leaf_cert_spiffe_id,json=leafCertSpiffeId,proto3" json:"leaf_cert_spiffe_id,omitempty"` ++ // The URIs that are present in the SubjectAltName extension of the peer leaf ++ // certificate. ++ // ++ // Note that the extracted URIs are not validated and may not be properly ++ // formatted. ++ LeafCertUris []string `protobuf:"bytes,2,rep,name=leaf_cert_uris,json=leafCertUris,proto3" json:"leaf_cert_uris,omitempty"` ++ // The DNSNames that are present in the SubjectAltName extension of the peer ++ // leaf certificate. ++ LeafCertDnsnames []string `protobuf:"bytes,3,rep,name=leaf_cert_dnsnames,json=leafCertDnsnames,proto3" json:"leaf_cert_dnsnames,omitempty"` ++ // The (ordered) list of fingerprints in the certificate chain used to verify ++ // the given leaf certificate. The order MUST be from leaf certificate ++ // fingerprint to root certificate fingerprint. ++ // ++ // A fingerprint is the base-64 encoding of the SHA256 hash of the ++ // DER-encoding of a certificate. The list MAY be populated even if the peer ++ // certificate chain was NOT validated successfully. ++ PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` ++ // The local identity used during session setup. ++ LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The SHA256 hash of the DER-encoding of the local leaf certificate used in ++ // the handshake. ++ LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` ++} ++ ++func (x *S2AContext) Reset() { ++ *x = S2AContext{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *S2AContext) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*S2AContext) ProtoMessage() {} ++ ++func (x *S2AContext) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. ++func (*S2AContext) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} ++} ++ ++func (x *S2AContext) GetLeafCertSpiffeId() string { ++ if x != nil { ++ return x.LeafCertSpiffeId ++ } ++ return "" ++} ++ ++func (x *S2AContext) GetLeafCertUris() []string { ++ if x != nil { ++ return x.LeafCertUris ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetLeafCertDnsnames() []string { ++ if x != nil { ++ return x.LeafCertDnsnames ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetPeerCertificateChainFingerprints() []string { ++ if x != nil { ++ return x.PeerCertificateChainFingerprints ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *S2AContext) GetLocalLeafCertFingerprint() []byte { ++ if x != nil { ++ return x.LocalLeafCertFingerprint ++ } ++ return nil ++} ++ ++var File_internal_proto_v2_s2a_context_s2a_context_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ ++ 0x0a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, ++ 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, ++ 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, ++ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, ++ 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, ++ 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, ++ 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, ++ 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, ++ 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, ++ 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, ++ 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, ++ 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, ++ 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, ++ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, ++ 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, ++ 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, ++ 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, ++ 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, ++ 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, ++ 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, ++ 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, ++ 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, ++ 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, ++ 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, ++ 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, ++ 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, ++ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, ++ 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, ++ 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, ++ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce sync.Once ++ file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc ++) ++ ++func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { ++ file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { ++ file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData) ++ }) ++ return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData ++} ++ ++var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) ++var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ ++ (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext ++ (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity ++} ++var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ ++ 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity ++ 1, // [1:1] is the sub-list for method output_type ++ 1, // [1:1] is the sub-list for method input_type ++ 1, // [1:1] is the sub-list for extension type_name ++ 1, // [1:1] is the sub-list for extension extendee ++ 0, // [0:1] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_v2_s2a_context_s2a_context_proto_init() } ++func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { ++ if File_internal_proto_v2_s2a_context_s2a_context_proto != nil { ++ return ++ } ++ if !protoimpl.UnsafeEnabled { ++ file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*S2AContext); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc, ++ NumEnums: 0, ++ NumMessages: 1, ++ NumExtensions: 0, ++ NumServices: 0, ++ }, ++ GoTypes: file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes, ++ DependencyIndexes: file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs, ++ MessageInfos: file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes, ++ }.Build() ++ File_internal_proto_v2_s2a_context_s2a_context_proto = out.File ++ file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = nil ++ file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = nil ++ file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +new file mode 100644 +index 00000000000..e843450c7ed +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +@@ -0,0 +1,2494 @@ ++// Copyright 2022 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// versions: ++// protoc-gen-go v1.30.0 ++// protoc v3.21.12 ++// source: internal/proto/v2/s2a/s2a.proto ++ ++package s2a_go_proto ++ ++import ( ++ common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" ++ common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ++ s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" ++ protoreflect "google.golang.org/protobuf/reflect/protoreflect" ++ protoimpl "google.golang.org/protobuf/runtime/protoimpl" ++ reflect "reflect" ++ sync "sync" ++) ++ ++const ( ++ // Verify that this generated code is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) ++ // Verify that runtime/protoimpl is sufficiently up-to-date. ++ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ++) ++ ++type SignatureAlgorithm int32 ++ ++const ( ++ SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED SignatureAlgorithm = 0 ++ // RSA Public-Key Cryptography Standards #1. ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256 SignatureAlgorithm = 1 ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384 SignatureAlgorithm = 2 ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512 SignatureAlgorithm = 3 ++ // ECDSA. ++ SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256 SignatureAlgorithm = 4 ++ SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384 SignatureAlgorithm = 5 ++ SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512 SignatureAlgorithm = 6 ++ // RSA Probabilistic Signature Scheme. ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256 SignatureAlgorithm = 7 ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384 SignatureAlgorithm = 8 ++ SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512 SignatureAlgorithm = 9 ++ // ED25519. ++ SignatureAlgorithm_S2A_SSL_SIGN_ED25519 SignatureAlgorithm = 10 ++) ++ ++// Enum value maps for SignatureAlgorithm. ++var ( ++ SignatureAlgorithm_name = map[int32]string{ ++ 0: "S2A_SSL_SIGN_UNSPECIFIED", ++ 1: "S2A_SSL_SIGN_RSA_PKCS1_SHA256", ++ 2: "S2A_SSL_SIGN_RSA_PKCS1_SHA384", ++ 3: "S2A_SSL_SIGN_RSA_PKCS1_SHA512", ++ 4: "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256", ++ 5: "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384", ++ 6: "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512", ++ 7: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256", ++ 8: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384", ++ 9: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512", ++ 10: "S2A_SSL_SIGN_ED25519", ++ } ++ SignatureAlgorithm_value = map[string]int32{ ++ "S2A_SSL_SIGN_UNSPECIFIED": 0, ++ "S2A_SSL_SIGN_RSA_PKCS1_SHA256": 1, ++ "S2A_SSL_SIGN_RSA_PKCS1_SHA384": 2, ++ "S2A_SSL_SIGN_RSA_PKCS1_SHA512": 3, ++ "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256": 4, ++ "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384": 5, ++ "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512": 6, ++ "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256": 7, ++ "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384": 8, ++ "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512": 9, ++ "S2A_SSL_SIGN_ED25519": 10, ++ } ++) ++ ++func (x SignatureAlgorithm) Enum() *SignatureAlgorithm { ++ p := new(SignatureAlgorithm) ++ *p = x ++ return p ++} ++ ++func (x SignatureAlgorithm) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (SignatureAlgorithm) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[0].Descriptor() ++} ++ ++func (SignatureAlgorithm) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[0] ++} ++ ++func (x SignatureAlgorithm) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use SignatureAlgorithm.Descriptor instead. ++func (SignatureAlgorithm) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} ++} ++ ++type GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate int32 ++ ++const ( ++ GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 0 ++ GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 1 ++ GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 2 ++ GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 3 ++ GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 4 ++ GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 5 ++) ++ ++// Enum value maps for GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate. ++var ( ++ GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_name = map[int32]string{ ++ 0: "UNSPECIFIED", ++ 1: "DONT_REQUEST_CLIENT_CERTIFICATE", ++ 2: "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", ++ 3: "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY", ++ 4: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", ++ 5: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY", ++ } ++ GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_value = map[string]int32{ ++ "UNSPECIFIED": 0, ++ "DONT_REQUEST_CLIENT_CERTIFICATE": 1, ++ "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 2, ++ "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY": 3, ++ "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 4, ++ "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY": 5, ++ } ++) ++ ++func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Enum() *GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { ++ p := new(GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) ++ *p = x ++ return p ++} ++ ++func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[1].Descriptor() ++} ++ ++func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[1] ++} ++ ++func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate.Descriptor instead. ++func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1, 0} ++} ++ ++type OffloadPrivateKeyOperationReq_PrivateKeyOperation int32 ++ ++const ( ++ OffloadPrivateKeyOperationReq_UNSPECIFIED OffloadPrivateKeyOperationReq_PrivateKeyOperation = 0 ++ // When performing a TLS 1.2 or 1.3 handshake, the (partial) transcript of ++ // the TLS handshake must be signed to prove possession of the private key. ++ // ++ // See https://www.rfc-editor.org/rfc/rfc8446.html#section-4.4.3. ++ OffloadPrivateKeyOperationReq_SIGN OffloadPrivateKeyOperationReq_PrivateKeyOperation = 1 ++ // When performing a TLS 1.2 handshake using an RSA algorithm, the key ++ // exchange algorithm involves the client generating a premaster secret, ++ // encrypting it using the server's public key, and sending this encrypted ++ // blob to the server in a ClientKeyExchange message. ++ // ++ // See https://www.rfc-editor.org/rfc/rfc4346#section-7.4.7.1. ++ OffloadPrivateKeyOperationReq_DECRYPT OffloadPrivateKeyOperationReq_PrivateKeyOperation = 2 ++) ++ ++// Enum value maps for OffloadPrivateKeyOperationReq_PrivateKeyOperation. ++var ( ++ OffloadPrivateKeyOperationReq_PrivateKeyOperation_name = map[int32]string{ ++ 0: "UNSPECIFIED", ++ 1: "SIGN", ++ 2: "DECRYPT", ++ } ++ OffloadPrivateKeyOperationReq_PrivateKeyOperation_value = map[string]int32{ ++ "UNSPECIFIED": 0, ++ "SIGN": 1, ++ "DECRYPT": 2, ++ } ++) ++ ++func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Enum() *OffloadPrivateKeyOperationReq_PrivateKeyOperation { ++ p := new(OffloadPrivateKeyOperationReq_PrivateKeyOperation) ++ *p = x ++ return p ++} ++ ++func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[2].Descriptor() ++} ++ ++func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[2] ++} ++ ++func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use OffloadPrivateKeyOperationReq_PrivateKeyOperation.Descriptor instead. ++func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5, 0} ++} ++ ++type OffloadResumptionKeyOperationReq_ResumptionKeyOperation int32 ++ ++const ( ++ OffloadResumptionKeyOperationReq_UNSPECIFIED OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 0 ++ OffloadResumptionKeyOperationReq_ENCRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 1 ++ OffloadResumptionKeyOperationReq_DECRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 2 ++) ++ ++// Enum value maps for OffloadResumptionKeyOperationReq_ResumptionKeyOperation. ++var ( ++ OffloadResumptionKeyOperationReq_ResumptionKeyOperation_name = map[int32]string{ ++ 0: "UNSPECIFIED", ++ 1: "ENCRYPT", ++ 2: "DECRYPT", ++ } ++ OffloadResumptionKeyOperationReq_ResumptionKeyOperation_value = map[string]int32{ ++ "UNSPECIFIED": 0, ++ "ENCRYPT": 1, ++ "DECRYPT": 2, ++ } ++) ++ ++func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Enum() *OffloadResumptionKeyOperationReq_ResumptionKeyOperation { ++ p := new(OffloadResumptionKeyOperationReq_ResumptionKeyOperation) ++ *p = x ++ return p ++} ++ ++func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[3].Descriptor() ++} ++ ++func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[3] ++} ++ ++func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use OffloadResumptionKeyOperationReq_ResumptionKeyOperation.Descriptor instead. ++func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7, 0} ++} ++ ++type ValidatePeerCertificateChainReq_VerificationMode int32 ++ ++const ( ++ // The default verification mode supported by S2A. ++ ValidatePeerCertificateChainReq_UNSPECIFIED ValidatePeerCertificateChainReq_VerificationMode = 0 ++ // The SPIFFE verification mode selects the set of trusted certificates to ++ // use for path building based on the SPIFFE trust domain in the peer's leaf ++ // certificate. ++ ValidatePeerCertificateChainReq_SPIFFE ValidatePeerCertificateChainReq_VerificationMode = 1 ++ // The connect-to-Google verification mode uses the trust bundle for ++ // connecting to Google, e.g. *.mtls.googleapis.com endpoints. ++ ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 ++) ++ ++// Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. ++var ( ++ ValidatePeerCertificateChainReq_VerificationMode_name = map[int32]string{ ++ 0: "UNSPECIFIED", ++ 1: "SPIFFE", ++ 2: "CONNECT_TO_GOOGLE", ++ } ++ ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ ++ "UNSPECIFIED": 0, ++ "SPIFFE": 1, ++ "CONNECT_TO_GOOGLE": 2, ++ } ++) ++ ++func (x ValidatePeerCertificateChainReq_VerificationMode) Enum() *ValidatePeerCertificateChainReq_VerificationMode { ++ p := new(ValidatePeerCertificateChainReq_VerificationMode) ++ *p = x ++ return p ++} ++ ++func (x ValidatePeerCertificateChainReq_VerificationMode) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (ValidatePeerCertificateChainReq_VerificationMode) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[4].Descriptor() ++} ++ ++func (ValidatePeerCertificateChainReq_VerificationMode) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[4] ++} ++ ++func (x ValidatePeerCertificateChainReq_VerificationMode) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainReq_VerificationMode.Descriptor instead. ++func (ValidatePeerCertificateChainReq_VerificationMode) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} ++} ++ ++type ValidatePeerCertificateChainResp_ValidationResult int32 ++ ++const ( ++ ValidatePeerCertificateChainResp_UNSPECIFIED ValidatePeerCertificateChainResp_ValidationResult = 0 ++ ValidatePeerCertificateChainResp_SUCCESS ValidatePeerCertificateChainResp_ValidationResult = 1 ++ ValidatePeerCertificateChainResp_FAILURE ValidatePeerCertificateChainResp_ValidationResult = 2 ++) ++ ++// Enum value maps for ValidatePeerCertificateChainResp_ValidationResult. ++var ( ++ ValidatePeerCertificateChainResp_ValidationResult_name = map[int32]string{ ++ 0: "UNSPECIFIED", ++ 1: "SUCCESS", ++ 2: "FAILURE", ++ } ++ ValidatePeerCertificateChainResp_ValidationResult_value = map[string]int32{ ++ "UNSPECIFIED": 0, ++ "SUCCESS": 1, ++ "FAILURE": 2, ++ } ++) ++ ++func (x ValidatePeerCertificateChainResp_ValidationResult) Enum() *ValidatePeerCertificateChainResp_ValidationResult { ++ p := new(ValidatePeerCertificateChainResp_ValidationResult) ++ *p = x ++ return p ++} ++ ++func (x ValidatePeerCertificateChainResp_ValidationResult) String() string { ++ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) ++} ++ ++func (ValidatePeerCertificateChainResp_ValidationResult) Descriptor() protoreflect.EnumDescriptor { ++ return file_internal_proto_v2_s2a_s2a_proto_enumTypes[5].Descriptor() ++} ++ ++func (ValidatePeerCertificateChainResp_ValidationResult) Type() protoreflect.EnumType { ++ return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[5] ++} ++ ++func (x ValidatePeerCertificateChainResp_ValidationResult) Number() protoreflect.EnumNumber { ++ return protoreflect.EnumNumber(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainResp_ValidationResult.Descriptor instead. ++func (ValidatePeerCertificateChainResp_ValidationResult) EnumDescriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10, 0} ++} ++ ++type AlpnPolicy struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // If true, the application MUST perform ALPN negotiation. ++ EnableAlpnNegotiation bool `protobuf:"varint,1,opt,name=enable_alpn_negotiation,json=enableAlpnNegotiation,proto3" json:"enable_alpn_negotiation,omitempty"` ++ // The ordered list of ALPN protocols that specify how the application SHOULD ++ // negotiate ALPN during the TLS handshake. ++ // ++ // The application MAY ignore any ALPN protocols in this list that are not ++ // supported by the application. ++ AlpnProtocols []common_go_proto.AlpnProtocol `protobuf:"varint,2,rep,packed,name=alpn_protocols,json=alpnProtocols,proto3,enum=s2a.proto.v2.AlpnProtocol" json:"alpn_protocols,omitempty"` ++} ++ ++func (x *AlpnPolicy) Reset() { ++ *x = AlpnPolicy{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *AlpnPolicy) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*AlpnPolicy) ProtoMessage() {} ++ ++func (x *AlpnPolicy) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use AlpnPolicy.ProtoReflect.Descriptor instead. ++func (*AlpnPolicy) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} ++} ++ ++func (x *AlpnPolicy) GetEnableAlpnNegotiation() bool { ++ if x != nil { ++ return x.EnableAlpnNegotiation ++ } ++ return false ++} ++ ++func (x *AlpnPolicy) GetAlpnProtocols() []common_go_proto.AlpnProtocol { ++ if x != nil { ++ return x.AlpnProtocols ++ } ++ return nil ++} ++ ++type AuthenticationMechanism struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // Applications may specify an identity associated to an authentication ++ // mechanism. Otherwise, S2A assumes that the authentication mechanism is ++ // associated with the default identity. If the default identity cannot be ++ // determined, the request is rejected. ++ Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` ++ // Types that are assignable to MechanismOneof: ++ // ++ // *AuthenticationMechanism_Token ++ MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` ++} ++ ++func (x *AuthenticationMechanism) Reset() { ++ *x = AuthenticationMechanism{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *AuthenticationMechanism) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*AuthenticationMechanism) ProtoMessage() {} ++ ++func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. ++func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} ++} ++ ++func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { ++ if x != nil { ++ return x.Identity ++ } ++ return nil ++} ++ ++func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { ++ if m != nil { ++ return m.MechanismOneof ++ } ++ return nil ++} ++ ++func (x *AuthenticationMechanism) GetToken() string { ++ if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { ++ return x.Token ++ } ++ return "" ++} ++ ++type isAuthenticationMechanism_MechanismOneof interface { ++ isAuthenticationMechanism_MechanismOneof() ++} ++ ++type AuthenticationMechanism_Token struct { ++ // A token that the application uses to authenticate itself to S2A. ++ Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` ++} ++ ++func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} ++ ++type Status struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The status code that is specific to the application and the implementation ++ // of S2A, e.g., gRPC status code. ++ Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` ++ // The status details. ++ Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` ++} ++ ++func (x *Status) Reset() { ++ *x = Status{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *Status) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*Status) ProtoMessage() {} ++ ++func (x *Status) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use Status.ProtoReflect.Descriptor instead. ++func (*Status) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{2} ++} ++ ++func (x *Status) GetCode() uint32 { ++ if x != nil { ++ return x.Code ++ } ++ return 0 ++} ++ ++func (x *Status) GetDetails() string { ++ if x != nil { ++ return x.Details ++ } ++ return "" ++} ++ ++type GetTlsConfigurationReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The role of the application in the TLS connection. ++ ConnectionSide common_go_proto.ConnectionSide `protobuf:"varint,1,opt,name=connection_side,json=connectionSide,proto3,enum=s2a.proto.v2.ConnectionSide" json:"connection_side,omitempty"` ++ // The server name indication (SNI) extension, which MAY be populated when a ++ // server is offloading to S2A. The SNI is used to determine the server ++ // identity if the local identity in the request is empty. ++ Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"` ++} ++ ++func (x *GetTlsConfigurationReq) Reset() { ++ *x = GetTlsConfigurationReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *GetTlsConfigurationReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*GetTlsConfigurationReq) ProtoMessage() {} ++ ++func (x *GetTlsConfigurationReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use GetTlsConfigurationReq.ProtoReflect.Descriptor instead. ++func (*GetTlsConfigurationReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{3} ++} ++ ++func (x *GetTlsConfigurationReq) GetConnectionSide() common_go_proto.ConnectionSide { ++ if x != nil { ++ return x.ConnectionSide ++ } ++ return common_go_proto.ConnectionSide(0) ++} ++ ++func (x *GetTlsConfigurationReq) GetSni() string { ++ if x != nil { ++ return x.Sni ++ } ++ return "" ++} ++ ++type GetTlsConfigurationResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // Types that are assignable to TlsConfiguration: ++ // ++ // *GetTlsConfigurationResp_ClientTlsConfiguration_ ++ // *GetTlsConfigurationResp_ServerTlsConfiguration_ ++ TlsConfiguration isGetTlsConfigurationResp_TlsConfiguration `protobuf_oneof:"tls_configuration"` ++} ++ ++func (x *GetTlsConfigurationResp) Reset() { ++ *x = GetTlsConfigurationResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *GetTlsConfigurationResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*GetTlsConfigurationResp) ProtoMessage() {} ++ ++func (x *GetTlsConfigurationResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use GetTlsConfigurationResp.ProtoReflect.Descriptor instead. ++func (*GetTlsConfigurationResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4} ++} ++ ++func (m *GetTlsConfigurationResp) GetTlsConfiguration() isGetTlsConfigurationResp_TlsConfiguration { ++ if m != nil { ++ return m.TlsConfiguration ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp) GetClientTlsConfiguration() *GetTlsConfigurationResp_ClientTlsConfiguration { ++ if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ClientTlsConfiguration_); ok { ++ return x.ClientTlsConfiguration ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp) GetServerTlsConfiguration() *GetTlsConfigurationResp_ServerTlsConfiguration { ++ if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ServerTlsConfiguration_); ok { ++ return x.ServerTlsConfiguration ++ } ++ return nil ++} ++ ++type isGetTlsConfigurationResp_TlsConfiguration interface { ++ isGetTlsConfigurationResp_TlsConfiguration() ++} ++ ++type GetTlsConfigurationResp_ClientTlsConfiguration_ struct { ++ ClientTlsConfiguration *GetTlsConfigurationResp_ClientTlsConfiguration `protobuf:"bytes,1,opt,name=client_tls_configuration,json=clientTlsConfiguration,proto3,oneof"` ++} ++ ++type GetTlsConfigurationResp_ServerTlsConfiguration_ struct { ++ ServerTlsConfiguration *GetTlsConfigurationResp_ServerTlsConfiguration `protobuf:"bytes,2,opt,name=server_tls_configuration,json=serverTlsConfiguration,proto3,oneof"` ++} ++ ++func (*GetTlsConfigurationResp_ClientTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { ++} ++ ++func (*GetTlsConfigurationResp_ServerTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { ++} ++ ++type OffloadPrivateKeyOperationReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The operation the private key is used for. ++ Operation OffloadPrivateKeyOperationReq_PrivateKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadPrivateKeyOperationReq_PrivateKeyOperation" json:"operation,omitempty"` ++ // The signature algorithm to be used for signing operations. ++ SignatureAlgorithm SignatureAlgorithm `protobuf:"varint,2,opt,name=signature_algorithm,json=signatureAlgorithm,proto3,enum=s2a.proto.v2.SignatureAlgorithm" json:"signature_algorithm,omitempty"` ++ // The input bytes to be signed or decrypted. ++ // ++ // Types that are assignable to InBytes: ++ // ++ // *OffloadPrivateKeyOperationReq_RawBytes ++ // *OffloadPrivateKeyOperationReq_Sha256Digest ++ // *OffloadPrivateKeyOperationReq_Sha384Digest ++ // *OffloadPrivateKeyOperationReq_Sha512Digest ++ InBytes isOffloadPrivateKeyOperationReq_InBytes `protobuf_oneof:"in_bytes"` ++} ++ ++func (x *OffloadPrivateKeyOperationReq) Reset() { ++ *x = OffloadPrivateKeyOperationReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *OffloadPrivateKeyOperationReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*OffloadPrivateKeyOperationReq) ProtoMessage() {} ++ ++func (x *OffloadPrivateKeyOperationReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use OffloadPrivateKeyOperationReq.ProtoReflect.Descriptor instead. ++func (*OffloadPrivateKeyOperationReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5} ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetOperation() OffloadPrivateKeyOperationReq_PrivateKeyOperation { ++ if x != nil { ++ return x.Operation ++ } ++ return OffloadPrivateKeyOperationReq_UNSPECIFIED ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetSignatureAlgorithm() SignatureAlgorithm { ++ if x != nil { ++ return x.SignatureAlgorithm ++ } ++ return SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED ++} ++ ++func (m *OffloadPrivateKeyOperationReq) GetInBytes() isOffloadPrivateKeyOperationReq_InBytes { ++ if m != nil { ++ return m.InBytes ++ } ++ return nil ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetRawBytes() []byte { ++ if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_RawBytes); ok { ++ return x.RawBytes ++ } ++ return nil ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetSha256Digest() []byte { ++ if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha256Digest); ok { ++ return x.Sha256Digest ++ } ++ return nil ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetSha384Digest() []byte { ++ if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha384Digest); ok { ++ return x.Sha384Digest ++ } ++ return nil ++} ++ ++func (x *OffloadPrivateKeyOperationReq) GetSha512Digest() []byte { ++ if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha512Digest); ok { ++ return x.Sha512Digest ++ } ++ return nil ++} ++ ++type isOffloadPrivateKeyOperationReq_InBytes interface { ++ isOffloadPrivateKeyOperationReq_InBytes() ++} ++ ++type OffloadPrivateKeyOperationReq_RawBytes struct { ++ // Raw bytes to be hashed and signed, or decrypted. ++ RawBytes []byte `protobuf:"bytes,4,opt,name=raw_bytes,json=rawBytes,proto3,oneof"` ++} ++ ++type OffloadPrivateKeyOperationReq_Sha256Digest struct { ++ // A SHA256 hash to be signed. Must be 32 bytes. ++ Sha256Digest []byte `protobuf:"bytes,5,opt,name=sha256_digest,json=sha256Digest,proto3,oneof"` ++} ++ ++type OffloadPrivateKeyOperationReq_Sha384Digest struct { ++ // A SHA384 hash to be signed. Must be 48 bytes. ++ Sha384Digest []byte `protobuf:"bytes,6,opt,name=sha384_digest,json=sha384Digest,proto3,oneof"` ++} ++ ++type OffloadPrivateKeyOperationReq_Sha512Digest struct { ++ // A SHA512 hash to be signed. Must be 64 bytes. ++ Sha512Digest []byte `protobuf:"bytes,7,opt,name=sha512_digest,json=sha512Digest,proto3,oneof"` ++} ++ ++func (*OffloadPrivateKeyOperationReq_RawBytes) isOffloadPrivateKeyOperationReq_InBytes() {} ++ ++func (*OffloadPrivateKeyOperationReq_Sha256Digest) isOffloadPrivateKeyOperationReq_InBytes() {} ++ ++func (*OffloadPrivateKeyOperationReq_Sha384Digest) isOffloadPrivateKeyOperationReq_InBytes() {} ++ ++func (*OffloadPrivateKeyOperationReq_Sha512Digest) isOffloadPrivateKeyOperationReq_InBytes() {} ++ ++type OffloadPrivateKeyOperationResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The signed or decrypted output bytes. ++ OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` ++} ++ ++func (x *OffloadPrivateKeyOperationResp) Reset() { ++ *x = OffloadPrivateKeyOperationResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *OffloadPrivateKeyOperationResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*OffloadPrivateKeyOperationResp) ProtoMessage() {} ++ ++func (x *OffloadPrivateKeyOperationResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use OffloadPrivateKeyOperationResp.ProtoReflect.Descriptor instead. ++func (*OffloadPrivateKeyOperationResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{6} ++} ++ ++func (x *OffloadPrivateKeyOperationResp) GetOutBytes() []byte { ++ if x != nil { ++ return x.OutBytes ++ } ++ return nil ++} ++ ++type OffloadResumptionKeyOperationReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The operation the resumption key is used for. ++ Operation OffloadResumptionKeyOperationReq_ResumptionKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadResumptionKeyOperationReq_ResumptionKeyOperation" json:"operation,omitempty"` ++ // The bytes to be encrypted or decrypted. ++ InBytes []byte `protobuf:"bytes,2,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` ++} ++ ++func (x *OffloadResumptionKeyOperationReq) Reset() { ++ *x = OffloadResumptionKeyOperationReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *OffloadResumptionKeyOperationReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*OffloadResumptionKeyOperationReq) ProtoMessage() {} ++ ++func (x *OffloadResumptionKeyOperationReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use OffloadResumptionKeyOperationReq.ProtoReflect.Descriptor instead. ++func (*OffloadResumptionKeyOperationReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7} ++} ++ ++func (x *OffloadResumptionKeyOperationReq) GetOperation() OffloadResumptionKeyOperationReq_ResumptionKeyOperation { ++ if x != nil { ++ return x.Operation ++ } ++ return OffloadResumptionKeyOperationReq_UNSPECIFIED ++} ++ ++func (x *OffloadResumptionKeyOperationReq) GetInBytes() []byte { ++ if x != nil { ++ return x.InBytes ++ } ++ return nil ++} ++ ++type OffloadResumptionKeyOperationResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The encrypted or decrypted bytes. ++ OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` ++} ++ ++func (x *OffloadResumptionKeyOperationResp) Reset() { ++ *x = OffloadResumptionKeyOperationResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *OffloadResumptionKeyOperationResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*OffloadResumptionKeyOperationResp) ProtoMessage() {} ++ ++func (x *OffloadResumptionKeyOperationResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use OffloadResumptionKeyOperationResp.ProtoReflect.Descriptor instead. ++func (*OffloadResumptionKeyOperationResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{8} ++} ++ ++func (x *OffloadResumptionKeyOperationResp) GetOutBytes() []byte { ++ if x != nil { ++ return x.OutBytes ++ } ++ return nil ++} ++ ++type ValidatePeerCertificateChainReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The verification mode that S2A MUST use to validate the peer certificate ++ // chain. ++ Mode ValidatePeerCertificateChainReq_VerificationMode `protobuf:"varint,1,opt,name=mode,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainReq_VerificationMode" json:"mode,omitempty"` ++ // Types that are assignable to PeerOneof: ++ // ++ // *ValidatePeerCertificateChainReq_ClientPeer_ ++ // *ValidatePeerCertificateChainReq_ServerPeer_ ++ PeerOneof isValidatePeerCertificateChainReq_PeerOneof `protobuf_oneof:"peer_oneof"` ++} ++ ++func (x *ValidatePeerCertificateChainReq) Reset() { ++ *x = ValidatePeerCertificateChainReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ValidatePeerCertificateChainReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ValidatePeerCertificateChainReq) ProtoMessage() {} ++ ++func (x *ValidatePeerCertificateChainReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainReq.ProtoReflect.Descriptor instead. ++func (*ValidatePeerCertificateChainReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9} ++} ++ ++func (x *ValidatePeerCertificateChainReq) GetMode() ValidatePeerCertificateChainReq_VerificationMode { ++ if x != nil { ++ return x.Mode ++ } ++ return ValidatePeerCertificateChainReq_UNSPECIFIED ++} ++ ++func (m *ValidatePeerCertificateChainReq) GetPeerOneof() isValidatePeerCertificateChainReq_PeerOneof { ++ if m != nil { ++ return m.PeerOneof ++ } ++ return nil ++} ++ ++func (x *ValidatePeerCertificateChainReq) GetClientPeer() *ValidatePeerCertificateChainReq_ClientPeer { ++ if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ClientPeer_); ok { ++ return x.ClientPeer ++ } ++ return nil ++} ++ ++func (x *ValidatePeerCertificateChainReq) GetServerPeer() *ValidatePeerCertificateChainReq_ServerPeer { ++ if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ServerPeer_); ok { ++ return x.ServerPeer ++ } ++ return nil ++} ++ ++type isValidatePeerCertificateChainReq_PeerOneof interface { ++ isValidatePeerCertificateChainReq_PeerOneof() ++} ++ ++type ValidatePeerCertificateChainReq_ClientPeer_ struct { ++ ClientPeer *ValidatePeerCertificateChainReq_ClientPeer `protobuf:"bytes,2,opt,name=client_peer,json=clientPeer,proto3,oneof"` ++} ++ ++type ValidatePeerCertificateChainReq_ServerPeer_ struct { ++ ServerPeer *ValidatePeerCertificateChainReq_ServerPeer `protobuf:"bytes,3,opt,name=server_peer,json=serverPeer,proto3,oneof"` ++} ++ ++func (*ValidatePeerCertificateChainReq_ClientPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} ++ ++func (*ValidatePeerCertificateChainReq_ServerPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} ++ ++type ValidatePeerCertificateChainResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The result of validating the peer certificate chain. ++ ValidationResult ValidatePeerCertificateChainResp_ValidationResult `protobuf:"varint,1,opt,name=validation_result,json=validationResult,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainResp_ValidationResult" json:"validation_result,omitempty"` ++ // The validation details. This field is only populated when the validation ++ // result is NOT SUCCESS. ++ ValidationDetails string `protobuf:"bytes,2,opt,name=validation_details,json=validationDetails,proto3" json:"validation_details,omitempty"` ++ // The S2A context contains information from the peer certificate chain. ++ // ++ // The S2A context MAY be populated even if validation of the peer certificate ++ // chain fails. ++ Context *s2a_context_go_proto.S2AContext `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"` ++} ++ ++func (x *ValidatePeerCertificateChainResp) Reset() { ++ *x = ValidatePeerCertificateChainResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ValidatePeerCertificateChainResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ValidatePeerCertificateChainResp) ProtoMessage() {} ++ ++func (x *ValidatePeerCertificateChainResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainResp.ProtoReflect.Descriptor instead. ++func (*ValidatePeerCertificateChainResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10} ++} ++ ++func (x *ValidatePeerCertificateChainResp) GetValidationResult() ValidatePeerCertificateChainResp_ValidationResult { ++ if x != nil { ++ return x.ValidationResult ++ } ++ return ValidatePeerCertificateChainResp_UNSPECIFIED ++} ++ ++func (x *ValidatePeerCertificateChainResp) GetValidationDetails() string { ++ if x != nil { ++ return x.ValidationDetails ++ } ++ return "" ++} ++ ++func (x *ValidatePeerCertificateChainResp) GetContext() *s2a_context_go_proto.S2AContext { ++ if x != nil { ++ return x.Context ++ } ++ return nil ++} ++ ++type SessionReq struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The identity corresponding to the TLS configurations that MUST be used for ++ // the TLS handshake. ++ // ++ // If a managed identity already exists, the local identity and authentication ++ // mechanisms are ignored. If a managed identity doesn't exist and the local ++ // identity is not populated, S2A will try to deduce the managed identity to ++ // use from the SNI extension. If that also fails, S2A uses the default ++ // identity (if one exists). ++ LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` ++ // The authentication mechanisms that the application wishes to use to ++ // authenticate to S2A, ordered by preference. S2A will always use the first ++ // authentication mechanism that matches the managed identity. ++ AuthenticationMechanisms []*AuthenticationMechanism `protobuf:"bytes,2,rep,name=authentication_mechanisms,json=authenticationMechanisms,proto3" json:"authentication_mechanisms,omitempty"` ++ // Types that are assignable to ReqOneof: ++ // ++ // *SessionReq_GetTlsConfigurationReq ++ // *SessionReq_OffloadPrivateKeyOperationReq ++ // *SessionReq_OffloadResumptionKeyOperationReq ++ // *SessionReq_ValidatePeerCertificateChainReq ++ ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` ++} ++ ++func (x *SessionReq) Reset() { ++ *x = SessionReq{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionReq) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionReq) ProtoMessage() {} ++ ++func (x *SessionReq) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. ++func (*SessionReq) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} ++} ++ ++func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { ++ if x != nil { ++ return x.LocalIdentity ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetAuthenticationMechanisms() []*AuthenticationMechanism { ++ if x != nil { ++ return x.AuthenticationMechanisms ++ } ++ return nil ++} ++ ++func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { ++ if m != nil { ++ return m.ReqOneof ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetGetTlsConfigurationReq() *GetTlsConfigurationReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_GetTlsConfigurationReq); ok { ++ return x.GetTlsConfigurationReq ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetOffloadPrivateKeyOperationReq() *OffloadPrivateKeyOperationReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_OffloadPrivateKeyOperationReq); ok { ++ return x.OffloadPrivateKeyOperationReq ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetOffloadResumptionKeyOperationReq() *OffloadResumptionKeyOperationReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_OffloadResumptionKeyOperationReq); ok { ++ return x.OffloadResumptionKeyOperationReq ++ } ++ return nil ++} ++ ++func (x *SessionReq) GetValidatePeerCertificateChainReq() *ValidatePeerCertificateChainReq { ++ if x, ok := x.GetReqOneof().(*SessionReq_ValidatePeerCertificateChainReq); ok { ++ return x.ValidatePeerCertificateChainReq ++ } ++ return nil ++} ++ ++type isSessionReq_ReqOneof interface { ++ isSessionReq_ReqOneof() ++} ++ ++type SessionReq_GetTlsConfigurationReq struct { ++ // Requests the certificate chain and TLS configuration corresponding to the ++ // local identity, which the application MUST use to negotiate the TLS ++ // handshake. ++ GetTlsConfigurationReq *GetTlsConfigurationReq `protobuf:"bytes,3,opt,name=get_tls_configuration_req,json=getTlsConfigurationReq,proto3,oneof"` ++} ++ ++type SessionReq_OffloadPrivateKeyOperationReq struct { ++ // Signs or decrypts the input bytes using a private key corresponding to ++ // the local identity in the request. ++ // ++ // WARNING: More than one OffloadPrivateKeyOperationReq may be sent to the ++ // S2Av2 by a server during a TLS 1.2 handshake. ++ OffloadPrivateKeyOperationReq *OffloadPrivateKeyOperationReq `protobuf:"bytes,4,opt,name=offload_private_key_operation_req,json=offloadPrivateKeyOperationReq,proto3,oneof"` ++} ++ ++type SessionReq_OffloadResumptionKeyOperationReq struct { ++ // Encrypts or decrypts the input bytes using a resumption key corresponding ++ // to the local identity in the request. ++ OffloadResumptionKeyOperationReq *OffloadResumptionKeyOperationReq `protobuf:"bytes,5,opt,name=offload_resumption_key_operation_req,json=offloadResumptionKeyOperationReq,proto3,oneof"` ++} ++ ++type SessionReq_ValidatePeerCertificateChainReq struct { ++ // Verifies the peer's certificate chain using ++ // (a) trust bundles corresponding to the local identity in the request, and ++ // (b) the verification mode in the request. ++ ValidatePeerCertificateChainReq *ValidatePeerCertificateChainReq `protobuf:"bytes,6,opt,name=validate_peer_certificate_chain_req,json=validatePeerCertificateChainReq,proto3,oneof"` ++} ++ ++func (*SessionReq_GetTlsConfigurationReq) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_OffloadPrivateKeyOperationReq) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_OffloadResumptionKeyOperationReq) isSessionReq_ReqOneof() {} ++ ++func (*SessionReq_ValidatePeerCertificateChainReq) isSessionReq_ReqOneof() {} ++ ++type SessionResp struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // Status of the session response. ++ // ++ // The status field is populated so that if an error occurs when making an ++ // individual request, then communication with the S2A may continue. If an ++ // error is returned directly (e.g. at the gRPC layer), then it may result ++ // that the bidirectional stream being closed. ++ Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` ++ // Types that are assignable to RespOneof: ++ // ++ // *SessionResp_GetTlsConfigurationResp ++ // *SessionResp_OffloadPrivateKeyOperationResp ++ // *SessionResp_OffloadResumptionKeyOperationResp ++ // *SessionResp_ValidatePeerCertificateChainResp ++ RespOneof isSessionResp_RespOneof `protobuf_oneof:"resp_oneof"` ++} ++ ++func (x *SessionResp) Reset() { ++ *x = SessionResp{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *SessionResp) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*SessionResp) ProtoMessage() {} ++ ++func (x *SessionResp) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. ++func (*SessionResp) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{12} ++} ++ ++func (x *SessionResp) GetStatus() *Status { ++ if x != nil { ++ return x.Status ++ } ++ return nil ++} ++ ++func (m *SessionResp) GetRespOneof() isSessionResp_RespOneof { ++ if m != nil { ++ return m.RespOneof ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetGetTlsConfigurationResp() *GetTlsConfigurationResp { ++ if x, ok := x.GetRespOneof().(*SessionResp_GetTlsConfigurationResp); ok { ++ return x.GetTlsConfigurationResp ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetOffloadPrivateKeyOperationResp() *OffloadPrivateKeyOperationResp { ++ if x, ok := x.GetRespOneof().(*SessionResp_OffloadPrivateKeyOperationResp); ok { ++ return x.OffloadPrivateKeyOperationResp ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetOffloadResumptionKeyOperationResp() *OffloadResumptionKeyOperationResp { ++ if x, ok := x.GetRespOneof().(*SessionResp_OffloadResumptionKeyOperationResp); ok { ++ return x.OffloadResumptionKeyOperationResp ++ } ++ return nil ++} ++ ++func (x *SessionResp) GetValidatePeerCertificateChainResp() *ValidatePeerCertificateChainResp { ++ if x, ok := x.GetRespOneof().(*SessionResp_ValidatePeerCertificateChainResp); ok { ++ return x.ValidatePeerCertificateChainResp ++ } ++ return nil ++} ++ ++type isSessionResp_RespOneof interface { ++ isSessionResp_RespOneof() ++} ++ ++type SessionResp_GetTlsConfigurationResp struct { ++ // Contains the certificate chain and TLS configurations corresponding to ++ // the local identity. ++ GetTlsConfigurationResp *GetTlsConfigurationResp `protobuf:"bytes,2,opt,name=get_tls_configuration_resp,json=getTlsConfigurationResp,proto3,oneof"` ++} ++ ++type SessionResp_OffloadPrivateKeyOperationResp struct { ++ // Contains the signed or encrypted output bytes using the private key ++ // corresponding to the local identity. ++ OffloadPrivateKeyOperationResp *OffloadPrivateKeyOperationResp `protobuf:"bytes,3,opt,name=offload_private_key_operation_resp,json=offloadPrivateKeyOperationResp,proto3,oneof"` ++} ++ ++type SessionResp_OffloadResumptionKeyOperationResp struct { ++ // Contains the encrypted or decrypted output bytes using the resumption key ++ // corresponding to the local identity. ++ OffloadResumptionKeyOperationResp *OffloadResumptionKeyOperationResp `protobuf:"bytes,4,opt,name=offload_resumption_key_operation_resp,json=offloadResumptionKeyOperationResp,proto3,oneof"` ++} ++ ++type SessionResp_ValidatePeerCertificateChainResp struct { ++ // Contains the validation result, peer identity and fingerprints of peer ++ // certificates. ++ ValidatePeerCertificateChainResp *ValidatePeerCertificateChainResp `protobuf:"bytes,5,opt,name=validate_peer_certificate_chain_resp,json=validatePeerCertificateChainResp,proto3,oneof"` ++} ++ ++func (*SessionResp_GetTlsConfigurationResp) isSessionResp_RespOneof() {} ++ ++func (*SessionResp_OffloadPrivateKeyOperationResp) isSessionResp_RespOneof() {} ++ ++func (*SessionResp_OffloadResumptionKeyOperationResp) isSessionResp_RespOneof() {} ++ ++func (*SessionResp_ValidatePeerCertificateChainResp) isSessionResp_RespOneof() {} ++ ++// Next ID: 8 ++type GetTlsConfigurationResp_ClientTlsConfiguration struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The certificate chain that the client MUST use for the TLS handshake. ++ // It's a list of PEM-encoded certificates, ordered from leaf to root, ++ // excluding the root. ++ CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` ++ // The minimum TLS version number that the client MUST use for the TLS ++ // handshake. If this field is not provided, the client MUST use the default ++ // minimum version of the client's TLS library. ++ MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` ++ // The maximum TLS version number that the client MUST use for the TLS ++ // handshake. If this field is not provided, the client MUST use the default ++ // maximum version of the client's TLS library. ++ MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` ++ // The ordered list of TLS 1.0-1.2 ciphersuites that the client MAY offer to ++ // negotiate in the TLS handshake. ++ Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,6,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` ++ // The policy that dictates how the client negotiates ALPN during the TLS ++ // handshake. ++ AlpnPolicy *AlpnPolicy `protobuf:"bytes,7,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) Reset() { ++ *x = GetTlsConfigurationResp_ClientTlsConfiguration{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*GetTlsConfigurationResp_ClientTlsConfiguration) ProtoMessage() {} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use GetTlsConfigurationResp_ClientTlsConfiguration.ProtoReflect.Descriptor instead. ++func (*GetTlsConfigurationResp_ClientTlsConfiguration) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 0} ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCertificateChain() []string { ++ if x != nil { ++ return x.CertificateChain ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MinTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MaxTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.Ciphersuites ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { ++ if x != nil { ++ return x.AlpnPolicy ++ } ++ return nil ++} ++ ++// Next ID: 12 ++type GetTlsConfigurationResp_ServerTlsConfiguration struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The certificate chain that the server MUST use for the TLS handshake. ++ // It's a list of PEM-encoded certificates, ordered from leaf to root, ++ // excluding the root. ++ CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` ++ // The minimum TLS version number that the server MUST use for the TLS ++ // handshake. If this field is not provided, the server MUST use the default ++ // minimum version of the server's TLS library. ++ MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` ++ // The maximum TLS version number that the server MUST use for the TLS ++ // handshake. If this field is not provided, the server MUST use the default ++ // maximum version of the server's TLS library. ++ MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` ++ // The ordered list of TLS 1.0-1.2 ciphersuites that the server MAY offer to ++ // negotiate in the TLS handshake. ++ Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,10,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` ++ // Whether to enable TLS resumption. ++ TlsResumptionEnabled bool `protobuf:"varint,6,opt,name=tls_resumption_enabled,json=tlsResumptionEnabled,proto3" json:"tls_resumption_enabled,omitempty"` ++ // Whether the server MUST request a client certificate (i.e. to negotiate ++ // TLS vs. mTLS). ++ RequestClientCertificate GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate `protobuf:"varint,7,opt,name=request_client_certificate,json=requestClientCertificate,proto3,enum=s2a.proto.v2.GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate" json:"request_client_certificate,omitempty"` ++ // Returns the maximum number of extra bytes that ++ // |OffloadResumptionKeyOperation| can add to the number of unencrypted ++ // bytes to form the encrypted bytes. ++ MaxOverheadOfTicketAead uint32 `protobuf:"varint,9,opt,name=max_overhead_of_ticket_aead,json=maxOverheadOfTicketAead,proto3" json:"max_overhead_of_ticket_aead,omitempty"` ++ // The policy that dictates how the server negotiates ALPN during the TLS ++ // handshake. ++ AlpnPolicy *AlpnPolicy `protobuf:"bytes,11,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) Reset() { ++ *x = GetTlsConfigurationResp_ServerTlsConfiguration{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*GetTlsConfigurationResp_ServerTlsConfiguration) ProtoMessage() {} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration.ProtoReflect.Descriptor instead. ++func (*GetTlsConfigurationResp_ServerTlsConfiguration) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1} ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCertificateChain() []string { ++ if x != nil { ++ return x.CertificateChain ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MinTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { ++ if x != nil { ++ return x.MaxTlsVersion ++ } ++ return common_go_proto.TLSVersion(0) ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { ++ if x != nil { ++ return x.Ciphersuites ++ } ++ return nil ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetTlsResumptionEnabled() bool { ++ if x != nil { ++ return x.TlsResumptionEnabled ++ } ++ return false ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetRequestClientCertificate() GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { ++ if x != nil { ++ return x.RequestClientCertificate ++ } ++ return GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxOverheadOfTicketAead() uint32 { ++ if x != nil { ++ return x.MaxOverheadOfTicketAead ++ } ++ return 0 ++} ++ ++func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { ++ if x != nil { ++ return x.AlpnPolicy ++ } ++ return nil ++} ++ ++type ValidatePeerCertificateChainReq_ClientPeer struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The certificate chain to be verified. The chain MUST be a list of ++ // DER-encoded certificates, ordered from leaf to root, excluding the root. ++ CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` ++} ++ ++func (x *ValidatePeerCertificateChainReq_ClientPeer) Reset() { ++ *x = ValidatePeerCertificateChainReq_ClientPeer{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ValidatePeerCertificateChainReq_ClientPeer) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ValidatePeerCertificateChainReq_ClientPeer) ProtoMessage() {} ++ ++func (x *ValidatePeerCertificateChainReq_ClientPeer) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainReq_ClientPeer.ProtoReflect.Descriptor instead. ++func (*ValidatePeerCertificateChainReq_ClientPeer) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} ++} ++ ++func (x *ValidatePeerCertificateChainReq_ClientPeer) GetCertificateChain() [][]byte { ++ if x != nil { ++ return x.CertificateChain ++ } ++ return nil ++} ++ ++type ValidatePeerCertificateChainReq_ServerPeer struct { ++ state protoimpl.MessageState ++ sizeCache protoimpl.SizeCache ++ unknownFields protoimpl.UnknownFields ++ ++ // The certificate chain to be verified. The chain MUST be a list of ++ // DER-encoded certificates, ordered from leaf to root, excluding the root. ++ CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` ++ // The expected hostname of the server. ++ ServerHostname string `protobuf:"bytes,2,opt,name=server_hostname,json=serverHostname,proto3" json:"server_hostname,omitempty"` ++ // The UnrestrictedClientPolicy specified by the user. ++ SerializedUnrestrictedClientPolicy []byte `protobuf:"bytes,3,opt,name=serialized_unrestricted_client_policy,json=serializedUnrestrictedClientPolicy,proto3" json:"serialized_unrestricted_client_policy,omitempty"` ++} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) Reset() { ++ *x = ValidatePeerCertificateChainReq_ServerPeer{} ++ if protoimpl.UnsafeEnabled { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ ms.StoreMessageInfo(mi) ++ } ++} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) String() string { ++ return protoimpl.X.MessageStringOf(x) ++} ++ ++func (*ValidatePeerCertificateChainReq_ServerPeer) ProtoMessage() {} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) ProtoReflect() protoreflect.Message { ++ mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] ++ if protoimpl.UnsafeEnabled && x != nil { ++ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ++ if ms.LoadMessageInfo() == nil { ++ ms.StoreMessageInfo(mi) ++ } ++ return ms ++ } ++ return mi.MessageOf(x) ++} ++ ++// Deprecated: Use ValidatePeerCertificateChainReq_ServerPeer.ProtoReflect.Descriptor instead. ++func (*ValidatePeerCertificateChainReq_ServerPeer) Descriptor() ([]byte, []int) { ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 1} ++} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) GetCertificateChain() [][]byte { ++ if x != nil { ++ return x.CertificateChain ++ } ++ return nil ++} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) GetServerHostname() string { ++ if x != nil { ++ return x.ServerHostname ++ } ++ return "" ++} ++ ++func (x *ValidatePeerCertificateChainReq_ServerPeer) GetSerializedUnrestrictedClientPolicy() []byte { ++ if x != nil { ++ return x.SerializedUnrestrictedClientPolicy ++ } ++ return nil ++} ++ ++var File_internal_proto_v2_s2a_s2a_proto protoreflect.FileDescriptor ++ ++var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ ++ 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, ++ 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, ++ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, ++ 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, ++ 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, ++ 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, ++ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, ++ 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, ++ 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, ++ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, ++ 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, ++ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, ++ 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, ++ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, ++ 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, ++ 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, ++ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, ++ 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, ++ 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, ++ 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, ++ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, ++ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, ++ 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, ++ 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, ++ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, ++ 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, ++ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, ++ 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, ++ 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, ++ 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, ++ 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, ++ 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, ++ 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, ++ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, ++ 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, ++ 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, ++ 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, ++ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, ++ 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, ++ 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, ++ 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, ++ 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, ++ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, ++ 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, ++ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, ++ 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, ++ 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, ++ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, ++ 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, ++ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, ++ 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, ++ 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, ++ 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, ++ 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, ++ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, ++ 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, ++ 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, ++ 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, ++ 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, ++ 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, ++ 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, ++ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, ++ 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, ++ 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, ++ 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, ++ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, ++ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, ++ 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, ++ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, ++ 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, ++ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, ++ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, ++ 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, ++ 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, ++ 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, ++ 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, ++ 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, ++ 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, ++ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, ++ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, ++ 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, ++ 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, ++ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, ++ 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, ++ 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, ++ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, ++ 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, ++ 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, ++ 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, ++ 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, ++ 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, ++ 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, ++ 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, ++ 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, ++ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, ++ 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, ++ 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, ++ 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, ++ 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, ++ 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, ++ 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, ++ 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, ++ 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, ++ 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, ++ 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, ++ 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, ++ 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, ++ 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, ++ 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, ++ 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, ++ 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, ++ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, ++ 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, ++ 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, ++ 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, ++ 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, ++ 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, ++ 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, ++ 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, ++ 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, ++ 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, ++ 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, ++ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, ++ 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, ++ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, ++ 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, ++ 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, ++ 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, ++ 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, ++ 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, ++ 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, ++ 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, ++ 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, ++ 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, ++ 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, ++ 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, ++ 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, ++ 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, ++ 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, ++ 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, ++ 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, ++ 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, ++ 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, ++ 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, ++ 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, ++ 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, ++ 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, ++ 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, ++ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, ++ 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, ++ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, ++ 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, ++ 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, ++ 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, ++ 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, ++ 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, ++ 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, ++ 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, ++ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, ++ 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, ++ 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, ++ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, ++ 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, ++ 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, ++ 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, ++ 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, ++ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, ++ 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, ++ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, ++ 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, ++ 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, ++ 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, ++ 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, ++ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, ++ 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, ++ 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, ++ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, ++ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, ++ 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, ++ 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, ++ 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, ++ 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, ++ 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, ++ 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, ++ 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, ++ 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, ++ 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, ++ 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, ++ 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, ++ 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, ++ 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, ++ 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, ++ 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, ++ 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, ++ 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, ++ 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, ++ 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, ++ 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, ++ 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, ++ 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, ++ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, ++ 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, ++ 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, ++ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, ++ 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, ++ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, ++ 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, ++ 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, ++ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, ++ 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, ++ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, ++ 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, ++ 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, ++ 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, ++ 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, ++ 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, ++ 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, ++ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, ++ 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, ++ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, ++ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, ++ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, ++ 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, ++ 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, ++ 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, ++ 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, ++ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, ++ 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, ++ 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, ++ 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, ++ 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, ++ 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, ++ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, ++ 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, ++ 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, ++ 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, ++ 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, ++ 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, ++ 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, ++ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, ++ 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, ++ 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, ++ 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, ++ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, ++ 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, ++ 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, ++ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, ++ 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, ++ 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, ++ 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, ++ 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, ++ 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, ++ 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, ++ 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, ++ 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, ++ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, ++ 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, ++ 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, ++ 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, ++ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, ++ 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, ++ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, ++ 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, ++ 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, ++ 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, ++ 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, ++ 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, ++ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, ++ 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, ++ 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, ++ 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, ++ 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, ++ 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, ++ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, ++ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, ++ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, ++ 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, ++ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, ++ 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, ++ 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, ++ 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, ++ 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, ++ 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, ++ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, ++ 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, ++ 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, ++ 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, ++ 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, ++ 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, ++ 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, ++ 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, ++ 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, ++ 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, ++ 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, ++ 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, ++ 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, ++ 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, ++ 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, ++ 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, ++ 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, ++ 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, ++ 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, ++ 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, ++ 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, ++ 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, ++ 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, ++ 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, ++ 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, ++ 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, ++ 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, ++ 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, ++ 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, ++ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, ++ 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, ++ 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, ++ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, ++ 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, ++ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ++} ++ ++var ( ++ file_internal_proto_v2_s2a_s2a_proto_rawDescOnce sync.Once ++ file_internal_proto_v2_s2a_s2a_proto_rawDescData = file_internal_proto_v2_s2a_s2a_proto_rawDesc ++) ++ ++func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { ++ file_internal_proto_v2_s2a_s2a_proto_rawDescOnce.Do(func() { ++ file_internal_proto_v2_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_s2a_proto_rawDescData) ++ }) ++ return file_internal_proto_v2_s2a_s2a_proto_rawDescData ++} ++ ++var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) ++var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) ++var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ ++ (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm ++ (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate ++ (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation ++ (OffloadResumptionKeyOperationReq_ResumptionKeyOperation)(0), // 3: s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation ++ (ValidatePeerCertificateChainReq_VerificationMode)(0), // 4: s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode ++ (ValidatePeerCertificateChainResp_ValidationResult)(0), // 5: s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult ++ (*AlpnPolicy)(nil), // 6: s2a.proto.v2.AlpnPolicy ++ (*AuthenticationMechanism)(nil), // 7: s2a.proto.v2.AuthenticationMechanism ++ (*Status)(nil), // 8: s2a.proto.v2.Status ++ (*GetTlsConfigurationReq)(nil), // 9: s2a.proto.v2.GetTlsConfigurationReq ++ (*GetTlsConfigurationResp)(nil), // 10: s2a.proto.v2.GetTlsConfigurationResp ++ (*OffloadPrivateKeyOperationReq)(nil), // 11: s2a.proto.v2.OffloadPrivateKeyOperationReq ++ (*OffloadPrivateKeyOperationResp)(nil), // 12: s2a.proto.v2.OffloadPrivateKeyOperationResp ++ (*OffloadResumptionKeyOperationReq)(nil), // 13: s2a.proto.v2.OffloadResumptionKeyOperationReq ++ (*OffloadResumptionKeyOperationResp)(nil), // 14: s2a.proto.v2.OffloadResumptionKeyOperationResp ++ (*ValidatePeerCertificateChainReq)(nil), // 15: s2a.proto.v2.ValidatePeerCertificateChainReq ++ (*ValidatePeerCertificateChainResp)(nil), // 16: s2a.proto.v2.ValidatePeerCertificateChainResp ++ (*SessionReq)(nil), // 17: s2a.proto.v2.SessionReq ++ (*SessionResp)(nil), // 18: s2a.proto.v2.SessionResp ++ (*GetTlsConfigurationResp_ClientTlsConfiguration)(nil), // 19: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration ++ (*GetTlsConfigurationResp_ServerTlsConfiguration)(nil), // 20: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration ++ (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer ++ (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer ++ (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol ++ (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity ++ (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide ++ (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext ++ (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion ++ (common_go_proto.Ciphersuite)(0), // 28: s2a.proto.v2.Ciphersuite ++} ++var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ ++ 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol ++ 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity ++ 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide ++ 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration ++ 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration ++ 2, // 5: s2a.proto.v2.OffloadPrivateKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation ++ 0, // 6: s2a.proto.v2.OffloadPrivateKeyOperationReq.signature_algorithm:type_name -> s2a.proto.v2.SignatureAlgorithm ++ 3, // 7: s2a.proto.v2.OffloadResumptionKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation ++ 4, // 8: s2a.proto.v2.ValidatePeerCertificateChainReq.mode:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode ++ 21, // 9: s2a.proto.v2.ValidatePeerCertificateChainReq.client_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer ++ 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer ++ 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult ++ 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext ++ 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity ++ 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism ++ 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq ++ 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq ++ 13, // 17: s2a.proto.v2.SessionReq.offload_resumption_key_operation_req:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq ++ 15, // 18: s2a.proto.v2.SessionReq.validate_peer_certificate_chain_req:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq ++ 8, // 19: s2a.proto.v2.SessionResp.status:type_name -> s2a.proto.v2.Status ++ 10, // 20: s2a.proto.v2.SessionResp.get_tls_configuration_resp:type_name -> s2a.proto.v2.GetTlsConfigurationResp ++ 12, // 21: s2a.proto.v2.SessionResp.offload_private_key_operation_resp:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationResp ++ 14, // 22: s2a.proto.v2.SessionResp.offload_resumption_key_operation_resp:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationResp ++ 16, // 23: s2a.proto.v2.SessionResp.validate_peer_certificate_chain_resp:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp ++ 27, // 24: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion ++ 27, // 25: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion ++ 28, // 26: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite ++ 6, // 27: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy ++ 27, // 28: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion ++ 27, // 29: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion ++ 28, // 30: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite ++ 1, // 31: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.request_client_certificate:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate ++ 6, // 32: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy ++ 17, // 33: s2a.proto.v2.S2AService.SetUpSession:input_type -> s2a.proto.v2.SessionReq ++ 18, // 34: s2a.proto.v2.S2AService.SetUpSession:output_type -> s2a.proto.v2.SessionResp ++ 34, // [34:35] is the sub-list for method output_type ++ 33, // [33:34] is the sub-list for method input_type ++ 33, // [33:33] is the sub-list for extension type_name ++ 33, // [33:33] is the sub-list for extension extendee ++ 0, // [0:33] is the sub-list for field type_name ++} ++ ++func init() { file_internal_proto_v2_s2a_s2a_proto_init() } ++func file_internal_proto_v2_s2a_s2a_proto_init() { ++ if File_internal_proto_v2_s2a_s2a_proto != nil { ++ return ++ } ++ if !protoimpl.UnsafeEnabled { ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*AlpnPolicy); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*AuthenticationMechanism); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*Status); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*GetTlsConfigurationReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*GetTlsConfigurationResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*OffloadPrivateKeyOperationReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*OffloadPrivateKeyOperationResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*OffloadResumptionKeyOperationReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*OffloadResumptionKeyOperationResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ValidatePeerCertificateChainReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ValidatePeerCertificateChainResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionReq); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*SessionResp); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { ++ switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { ++ case 0: ++ return &v.state ++ case 1: ++ return &v.sizeCache ++ case 2: ++ return &v.unknownFields ++ default: ++ return nil ++ } ++ } ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ ++ (*AuthenticationMechanism_Token)(nil), ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ ++ (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), ++ (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ ++ (*OffloadPrivateKeyOperationReq_RawBytes)(nil), ++ (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), ++ (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), ++ (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ ++ (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), ++ (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ ++ (*SessionReq_GetTlsConfigurationReq)(nil), ++ (*SessionReq_OffloadPrivateKeyOperationReq)(nil), ++ (*SessionReq_OffloadResumptionKeyOperationReq)(nil), ++ (*SessionReq_ValidatePeerCertificateChainReq)(nil), ++ } ++ file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ ++ (*SessionResp_GetTlsConfigurationResp)(nil), ++ (*SessionResp_OffloadPrivateKeyOperationResp)(nil), ++ (*SessionResp_OffloadResumptionKeyOperationResp)(nil), ++ (*SessionResp_ValidatePeerCertificateChainResp)(nil), ++ } ++ type x struct{} ++ out := protoimpl.TypeBuilder{ ++ File: protoimpl.DescBuilder{ ++ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), ++ RawDescriptor: file_internal_proto_v2_s2a_s2a_proto_rawDesc, ++ NumEnums: 6, ++ NumMessages: 17, ++ NumExtensions: 0, ++ NumServices: 1, ++ }, ++ GoTypes: file_internal_proto_v2_s2a_s2a_proto_goTypes, ++ DependencyIndexes: file_internal_proto_v2_s2a_s2a_proto_depIdxs, ++ EnumInfos: file_internal_proto_v2_s2a_s2a_proto_enumTypes, ++ MessageInfos: file_internal_proto_v2_s2a_s2a_proto_msgTypes, ++ }.Build() ++ File_internal_proto_v2_s2a_s2a_proto = out.File ++ file_internal_proto_v2_s2a_s2a_proto_rawDesc = nil ++ file_internal_proto_v2_s2a_s2a_proto_goTypes = nil ++ file_internal_proto_v2_s2a_s2a_proto_depIdxs = nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +new file mode 100644 +index 00000000000..2566df6c304 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +@@ -0,0 +1,159 @@ ++// Copyright 2022 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// https://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated by protoc-gen-go-grpc. DO NOT EDIT. ++// versions: ++// - protoc-gen-go-grpc v1.3.0 ++// - protoc v3.21.12 ++// source: internal/proto/v2/s2a/s2a.proto ++ ++package s2a_go_proto ++ ++import ( ++ context "context" ++ grpc "google.golang.org/grpc" ++ codes "google.golang.org/grpc/codes" ++ status "google.golang.org/grpc/status" ++) ++ ++// This is a compile-time assertion to ensure that this generated file ++// is compatible with the grpc package it is being compiled against. ++// Requires gRPC-Go v1.32.0 or later. ++const _ = grpc.SupportPackageIsVersion7 ++ ++const ( ++ S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" ++) ++ ++// S2AServiceClient is the client API for S2AService service. ++// ++// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. ++type S2AServiceClient interface { ++ // SetUpSession is a bidirectional stream used by applications to offload ++ // operations from the TLS handshake. ++ SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) ++} ++ ++type s2AServiceClient struct { ++ cc grpc.ClientConnInterface ++} ++ ++func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { ++ return &s2AServiceClient{cc} ++} ++ ++func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { ++ stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) ++ if err != nil { ++ return nil, err ++ } ++ x := &s2AServiceSetUpSessionClient{stream} ++ return x, nil ++} ++ ++type S2AService_SetUpSessionClient interface { ++ Send(*SessionReq) error ++ Recv() (*SessionResp, error) ++ grpc.ClientStream ++} ++ ++type s2AServiceSetUpSessionClient struct { ++ grpc.ClientStream ++} ++ ++func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { ++ return x.ClientStream.SendMsg(m) ++} ++ ++func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { ++ m := new(SessionResp) ++ if err := x.ClientStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++// S2AServiceServer is the server API for S2AService service. ++// All implementations must embed UnimplementedS2AServiceServer ++// for forward compatibility ++type S2AServiceServer interface { ++ // SetUpSession is a bidirectional stream used by applications to offload ++ // operations from the TLS handshake. ++ SetUpSession(S2AService_SetUpSessionServer) error ++ mustEmbedUnimplementedS2AServiceServer() ++} ++ ++// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. ++type UnimplementedS2AServiceServer struct { ++} ++ ++func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { ++ return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") ++} ++func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} ++ ++// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. ++// Use of this interface is not recommended, as added methods to S2AServiceServer will ++// result in compilation errors. ++type UnsafeS2AServiceServer interface { ++ mustEmbedUnimplementedS2AServiceServer() ++} ++ ++func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { ++ s.RegisterService(&S2AService_ServiceDesc, srv) ++} ++ ++func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { ++ return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) ++} ++ ++type S2AService_SetUpSessionServer interface { ++ Send(*SessionResp) error ++ Recv() (*SessionReq, error) ++ grpc.ServerStream ++} ++ ++type s2AServiceSetUpSessionServer struct { ++ grpc.ServerStream ++} ++ ++func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { ++ return x.ServerStream.SendMsg(m) ++} ++ ++func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { ++ m := new(SessionReq) ++ if err := x.ServerStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. ++// It's only intended for direct use with grpc.RegisterService, ++// and not to be introspected or modified (even as a copy) ++var S2AService_ServiceDesc = grpc.ServiceDesc{ ++ ServiceName: "s2a.proto.v2.S2AService", ++ HandlerType: (*S2AServiceServer)(nil), ++ Methods: []grpc.MethodDesc{}, ++ Streams: []grpc.StreamDesc{ ++ { ++ StreamName: "SetUpSession", ++ Handler: _S2AService_SetUpSession_Handler, ++ ServerStreams: true, ++ ClientStreams: true, ++ }, ++ }, ++ Metadata: "internal/proto/v2/s2a/s2a.proto", ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go +new file mode 100644 +index 00000000000..486f4ec4f2a +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go +@@ -0,0 +1,34 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package aeadcrypter provides the interface for AEAD cipher implementations ++// used by S2A's record protocol. ++package aeadcrypter ++ ++// S2AAEADCrypter is the interface for an AEAD cipher used by the S2A record ++// protocol. ++type S2AAEADCrypter interface { ++ // Encrypt encrypts the plaintext and computes the tag of dst and plaintext. ++ // dst and plaintext may fully overlap or not at all. ++ Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) ++ // Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may ++ // fully overlap or not at all. ++ Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) ++ // TagSize returns the tag size in bytes. ++ TagSize() int ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go +new file mode 100644 +index 00000000000..85c4e595d75 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go +@@ -0,0 +1,70 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package aeadcrypter ++ ++import ( ++ "crypto/aes" ++ "crypto/cipher" ++ "fmt" ++) ++ ++// Supported key sizes in bytes. ++const ( ++ AES128GCMKeySize = 16 ++ AES256GCMKeySize = 32 ++) ++ ++// aesgcm is the struct that holds an AES-GCM cipher for the S2A AEAD crypter. ++type aesgcm struct { ++ aead cipher.AEAD ++} ++ ++// NewAESGCM creates an AES-GCM crypter instance. Note that the key must be ++// either 128 bits or 256 bits. ++func NewAESGCM(key []byte) (S2AAEADCrypter, error) { ++ if len(key) != AES128GCMKeySize && len(key) != AES256GCMKeySize { ++ return nil, fmt.Errorf("%d or %d bytes, given: %d", AES128GCMKeySize, AES256GCMKeySize, len(key)) ++ } ++ c, err := aes.NewCipher(key) ++ if err != nil { ++ return nil, err ++ } ++ a, err := cipher.NewGCM(c) ++ if err != nil { ++ return nil, err ++ } ++ return &aesgcm{aead: a}, nil ++} ++ ++// Encrypt is the encryption function. dst can contain bytes at the beginning of ++// the ciphertext that will not be encrypted but will be authenticated. If dst ++// has enough capacity to hold these bytes, the ciphertext and the tag, no ++// allocation and copy operations will be performed. dst and plaintext may ++// fully overlap or not at all. ++func (s *aesgcm) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { ++ return encrypt(s.aead, dst, plaintext, nonce, aad) ++} ++ ++func (s *aesgcm) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { ++ return decrypt(s.aead, dst, ciphertext, nonce, aad) ++} ++ ++func (s *aesgcm) TagSize() int { ++ return TagSize ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go +new file mode 100644 +index 00000000000..214df4ca415 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go +@@ -0,0 +1,67 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package aeadcrypter ++ ++import ( ++ "crypto/cipher" ++ "fmt" ++ ++ "golang.org/x/crypto/chacha20poly1305" ++) ++ ++// Supported key size in bytes. ++const ( ++ Chacha20Poly1305KeySize = 32 ++) ++ ++// chachapoly is the struct that holds a CHACHA-POLY cipher for the S2A AEAD ++// crypter. ++type chachapoly struct { ++ aead cipher.AEAD ++} ++ ++// NewChachaPoly creates a Chacha-Poly crypter instance. Note that the key must ++// be Chacha20Poly1305KeySize bytes in length. ++func NewChachaPoly(key []byte) (S2AAEADCrypter, error) { ++ if len(key) != Chacha20Poly1305KeySize { ++ return nil, fmt.Errorf("%d bytes, given: %d", Chacha20Poly1305KeySize, len(key)) ++ } ++ c, err := chacha20poly1305.New(key) ++ if err != nil { ++ return nil, err ++ } ++ return &chachapoly{aead: c}, nil ++} ++ ++// Encrypt is the encryption function. dst can contain bytes at the beginning of ++// the ciphertext that will not be encrypted but will be authenticated. If dst ++// has enough capacity to hold these bytes, the ciphertext and the tag, no ++// allocation and copy operations will be performed. dst and plaintext may ++// fully overlap or not at all. ++func (s *chachapoly) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { ++ return encrypt(s.aead, dst, plaintext, nonce, aad) ++} ++ ++func (s *chachapoly) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { ++ return decrypt(s.aead, dst, ciphertext, nonce, aad) ++} ++ ++func (s *chachapoly) TagSize() int { ++ return TagSize ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go +new file mode 100644 +index 00000000000..b3c36ad95dc +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go +@@ -0,0 +1,92 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package aeadcrypter ++ ++import ( ++ "crypto/cipher" ++ "fmt" ++) ++ ++const ( ++ // TagSize is the tag size in bytes for AES-128-GCM-SHA256, ++ // AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. ++ TagSize = 16 ++ // NonceSize is the size of the nonce in number of bytes for ++ // AES-128-GCM-SHA256, AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. ++ NonceSize = 12 ++ // SHA256DigestSize is the digest size of sha256 in bytes. ++ SHA256DigestSize = 32 ++ // SHA384DigestSize is the digest size of sha384 in bytes. ++ SHA384DigestSize = 48 ++) ++ ++// sliceForAppend takes a slice and a requested number of bytes. It returns a ++// slice with the contents of the given slice followed by that many bytes and a ++// second slice that aliases into it and contains only the extra bytes. If the ++// original slice has sufficient capacity then no allocation is performed. ++func sliceForAppend(in []byte, n int) (head, tail []byte) { ++ if total := len(in) + n; cap(in) >= total { ++ head = in[:total] ++ } else { ++ head = make([]byte, total) ++ copy(head, in) ++ } ++ tail = head[len(in):] ++ return head, tail ++} ++ ++// encrypt is the encryption function for an AEAD crypter. aead determines ++// the type of AEAD crypter. dst can contain bytes at the beginning of the ++// ciphertext that will not be encrypted but will be authenticated. If dst has ++// enough capacity to hold these bytes, the ciphertext and the tag, no ++// allocation and copy operations will be performed. dst and plaintext may ++// fully overlap or not at all. ++func encrypt(aead cipher.AEAD, dst, plaintext, nonce, aad []byte) ([]byte, error) { ++ if len(nonce) != NonceSize { ++ return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) ++ } ++ // If we need to allocate an output buffer, we want to include space for ++ // the tag to avoid forcing the caller to reallocate as well. ++ dlen := len(dst) ++ dst, out := sliceForAppend(dst, len(plaintext)+TagSize) ++ data := out[:len(plaintext)] ++ copy(data, plaintext) // data may fully overlap plaintext ++ ++ // Seal appends the ciphertext and the tag to its first argument and ++ // returns the updated slice. However, sliceForAppend above ensures that ++ // dst has enough capacity to avoid a reallocation and copy due to the ++ // append. ++ dst = aead.Seal(dst[:dlen], nonce, data, aad) ++ return dst, nil ++} ++ ++// decrypt is the decryption function for an AEAD crypter, where aead determines ++// the type of AEAD crypter, and dst the destination bytes for the decrypted ++// ciphertext. The dst buffer may fully overlap with plaintext or not at all. ++func decrypt(aead cipher.AEAD, dst, ciphertext, nonce, aad []byte) ([]byte, error) { ++ if len(nonce) != NonceSize { ++ return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) ++ } ++ // If dst is equal to ciphertext[:0], ciphertext storage is reused. ++ plaintext, err := aead.Open(dst, nonce, ciphertext, aad) ++ if err != nil { ++ return nil, fmt.Errorf("message auth failed: %v", err) ++ } ++ return plaintext, nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go +new file mode 100644 +index 00000000000..ddeaa6d77d7 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go +@@ -0,0 +1,98 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package halfconn ++ ++import ( ++ "crypto/sha256" ++ "crypto/sha512" ++ "fmt" ++ "hash" ++ ++ s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ "github.com/google/s2a-go/internal/record/internal/aeadcrypter" ++) ++ ++// ciphersuite is the interface for retrieving ciphersuite-specific information ++// and utilities. ++type ciphersuite interface { ++ // keySize returns the key size in bytes. This refers to the key used by ++ // the AEAD crypter. This is derived by calling HKDF expand on the traffic ++ // secret. ++ keySize() int ++ // nonceSize returns the nonce size in bytes. ++ nonceSize() int ++ // trafficSecretSize returns the traffic secret size in bytes. This refers ++ // to the secret used to derive the traffic key and nonce, as specified in ++ // https://tools.ietf.org/html/rfc8446#section-7. ++ trafficSecretSize() int ++ // hashFunction returns the hash function for the ciphersuite. ++ hashFunction() func() hash.Hash ++ // aeadCrypter takes a key and creates an AEAD crypter for the ciphersuite ++ // using that key. ++ aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) ++} ++ ++func newCiphersuite(ciphersuite s2apb.Ciphersuite) (ciphersuite, error) { ++ switch ciphersuite { ++ case s2apb.Ciphersuite_AES_128_GCM_SHA256: ++ return &aesgcm128sha256{}, nil ++ case s2apb.Ciphersuite_AES_256_GCM_SHA384: ++ return &aesgcm256sha384{}, nil ++ case s2apb.Ciphersuite_CHACHA20_POLY1305_SHA256: ++ return &chachapolysha256{}, nil ++ default: ++ return nil, fmt.Errorf("unrecognized ciphersuite: %v", ciphersuite) ++ } ++} ++ ++// aesgcm128sha256 is the AES-128-GCM-SHA256 implementation of the ciphersuite ++// interface. ++type aesgcm128sha256 struct{} ++ ++func (aesgcm128sha256) keySize() int { return aeadcrypter.AES128GCMKeySize } ++func (aesgcm128sha256) nonceSize() int { return aeadcrypter.NonceSize } ++func (aesgcm128sha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } ++func (aesgcm128sha256) hashFunction() func() hash.Hash { return sha256.New } ++func (aesgcm128sha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { ++ return aeadcrypter.NewAESGCM(key) ++} ++ ++// aesgcm256sha384 is the AES-256-GCM-SHA384 implementation of the ciphersuite ++// interface. ++type aesgcm256sha384 struct{} ++ ++func (aesgcm256sha384) keySize() int { return aeadcrypter.AES256GCMKeySize } ++func (aesgcm256sha384) nonceSize() int { return aeadcrypter.NonceSize } ++func (aesgcm256sha384) trafficSecretSize() int { return aeadcrypter.SHA384DigestSize } ++func (aesgcm256sha384) hashFunction() func() hash.Hash { return sha512.New384 } ++func (aesgcm256sha384) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { ++ return aeadcrypter.NewAESGCM(key) ++} ++ ++// chachapolysha256 is the ChaChaPoly-SHA256 implementation of the ciphersuite ++// interface. ++type chachapolysha256 struct{} ++ ++func (chachapolysha256) keySize() int { return aeadcrypter.Chacha20Poly1305KeySize } ++func (chachapolysha256) nonceSize() int { return aeadcrypter.NonceSize } ++func (chachapolysha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } ++func (chachapolysha256) hashFunction() func() hash.Hash { return sha256.New } ++func (chachapolysha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { ++ return aeadcrypter.NewChachaPoly(key) ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go +new file mode 100644 +index 00000000000..9499cdca759 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go +@@ -0,0 +1,60 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package halfconn ++ ++import "errors" ++ ++// counter is a 64-bit counter. ++type counter struct { ++ val uint64 ++ hasOverflowed bool ++} ++ ++// newCounter creates a new counter with the initial value set to val. ++func newCounter(val uint64) counter { ++ return counter{val: val} ++} ++ ++// value returns the current value of the counter. ++func (c *counter) value() (uint64, error) { ++ if c.hasOverflowed { ++ return 0, errors.New("counter has overflowed") ++ } ++ return c.val, nil ++} ++ ++// increment increments the counter and checks for overflow. ++func (c *counter) increment() { ++ // If the counter is already invalid due to overflow, there is no need to ++ // increase it. We check for the hasOverflowed flag in the call to value(). ++ if c.hasOverflowed { ++ return ++ } ++ c.val++ ++ if c.val == 0 { ++ c.hasOverflowed = true ++ } ++} ++ ++// reset sets the counter value to zero and sets the hasOverflowed flag to ++// false. ++func (c *counter) reset() { ++ c.val = 0 ++ c.hasOverflowed = false ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go +new file mode 100644 +index 00000000000..e05f2c36a6d +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go +@@ -0,0 +1,59 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package halfconn ++ ++import ( ++ "fmt" ++ "hash" ++ ++ "golang.org/x/crypto/hkdf" ++) ++ ++// hkdfExpander is the interface for the HKDF expansion function; see ++// https://tools.ietf.org/html/rfc5869 for details. its use in TLS 1.3 is ++// specified in https://tools.ietf.org/html/rfc8446#section-7.2 ++type hkdfExpander interface { ++ // expand takes a secret, a label, and the output length in bytes, and ++ // returns the resulting expanded key. ++ expand(secret, label []byte, length int) ([]byte, error) ++} ++ ++// defaultHKDFExpander is the default HKDF expander which uses Go's crypto/hkdf ++// for HKDF expansion. ++type defaultHKDFExpander struct { ++ h func() hash.Hash ++} ++ ++// newDefaultHKDFExpander creates an instance of the default HKDF expander ++// using the given hash function. ++func newDefaultHKDFExpander(h func() hash.Hash) hkdfExpander { ++ return &defaultHKDFExpander{h: h} ++} ++ ++func (d *defaultHKDFExpander) expand(secret, label []byte, length int) ([]byte, error) { ++ outBuf := make([]byte, length) ++ n, err := hkdf.Expand(d.h, secret, label).Read(outBuf) ++ if err != nil { ++ return nil, fmt.Errorf("hkdf.Expand.Read failed with error: %v", err) ++ } ++ if n < length { ++ return nil, fmt.Errorf("hkdf.Expand.Read returned unexpected length, got %d, want %d", n, length) ++ } ++ return outBuf, nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go +new file mode 100644 +index 00000000000..dff99ff5940 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go +@@ -0,0 +1,193 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package halfconn manages the inbound or outbound traffic of a TLS 1.3 ++// connection. ++package halfconn ++ ++import ( ++ "fmt" ++ "sync" ++ ++ s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ "github.com/google/s2a-go/internal/record/internal/aeadcrypter" ++ "golang.org/x/crypto/cryptobyte" ++) ++ ++// The constants below were taken from Section 7.2 and 7.3 in ++// https://tools.ietf.org/html/rfc8446#section-7. They are used as the label ++// in HKDF-Expand-Label. ++const ( ++ tls13Key = "tls13 key" ++ tls13Nonce = "tls13 iv" ++ tls13Update = "tls13 traffic upd" ++) ++ ++// S2AHalfConnection stores the state of the TLS 1.3 connection in the ++// inbound or outbound direction. ++type S2AHalfConnection struct { ++ cs ciphersuite ++ expander hkdfExpander ++ // mutex guards sequence, aeadCrypter, trafficSecret, and nonce. ++ mutex sync.Mutex ++ aeadCrypter aeadcrypter.S2AAEADCrypter ++ sequence counter ++ trafficSecret []byte ++ nonce []byte ++} ++ ++// New creates a new instance of S2AHalfConnection given a ciphersuite and a ++// traffic secret. ++func New(ciphersuite s2apb.Ciphersuite, trafficSecret []byte, sequence uint64) (*S2AHalfConnection, error) { ++ cs, err := newCiphersuite(ciphersuite) ++ if err != nil { ++ return nil, fmt.Errorf("failed to create new ciphersuite: %v", ciphersuite) ++ } ++ if cs.trafficSecretSize() != len(trafficSecret) { ++ return nil, fmt.Errorf("supplied traffic secret must be %v bytes, given: %v bytes", cs.trafficSecretSize(), len(trafficSecret)) ++ } ++ ++ hc := &S2AHalfConnection{cs: cs, expander: newDefaultHKDFExpander(cs.hashFunction()), sequence: newCounter(sequence), trafficSecret: trafficSecret} ++ if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { ++ return nil, fmt.Errorf("failed to create half connection using traffic secret: %v", err) ++ } ++ ++ return hc, nil ++} ++ ++// Encrypt encrypts the plaintext and computes the tag of dst and plaintext. ++// dst and plaintext may fully overlap or not at all. Note that the sequence ++// number will still be incremented on failure, unless the sequence has ++// overflowed. ++func (hc *S2AHalfConnection) Encrypt(dst, plaintext, aad []byte) ([]byte, error) { ++ hc.mutex.Lock() ++ sequence, err := hc.getAndIncrementSequence() ++ if err != nil { ++ hc.mutex.Unlock() ++ return nil, err ++ } ++ nonce := hc.maskedNonce(sequence) ++ crypter := hc.aeadCrypter ++ hc.mutex.Unlock() ++ return crypter.Encrypt(dst, plaintext, nonce, aad) ++} ++ ++// Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may ++// fully overlap or not at all. Note that the sequence number will still be ++// incremented on failure, unless the sequence has overflowed. ++func (hc *S2AHalfConnection) Decrypt(dst, ciphertext, aad []byte) ([]byte, error) { ++ hc.mutex.Lock() ++ sequence, err := hc.getAndIncrementSequence() ++ if err != nil { ++ hc.mutex.Unlock() ++ return nil, err ++ } ++ nonce := hc.maskedNonce(sequence) ++ crypter := hc.aeadCrypter ++ hc.mutex.Unlock() ++ return crypter.Decrypt(dst, ciphertext, nonce, aad) ++} ++ ++// UpdateKey advances the traffic secret key, as specified in ++// https://tools.ietf.org/html/rfc8446#section-7.2. In addition, it derives ++// a new key and nonce, and resets the sequence number. ++func (hc *S2AHalfConnection) UpdateKey() error { ++ hc.mutex.Lock() ++ defer hc.mutex.Unlock() ++ ++ var err error ++ hc.trafficSecret, err = hc.deriveSecret(hc.trafficSecret, []byte(tls13Update), hc.cs.trafficSecretSize()) ++ if err != nil { ++ return fmt.Errorf("failed to derive traffic secret: %v", err) ++ } ++ ++ if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { ++ return fmt.Errorf("failed to update half connection: %v", err) ++ } ++ ++ hc.sequence.reset() ++ return nil ++} ++ ++// TagSize returns the tag size in bytes of the underlying AEAD crypter. ++func (hc *S2AHalfConnection) TagSize() int { ++ return hc.aeadCrypter.TagSize() ++} ++ ++// updateCrypterAndNonce takes a new traffic secret and updates the crypter ++// and nonce. Note that the mutex must be held while calling this function. ++func (hc *S2AHalfConnection) updateCrypterAndNonce(newTrafficSecret []byte) error { ++ key, err := hc.deriveSecret(newTrafficSecret, []byte(tls13Key), hc.cs.keySize()) ++ if err != nil { ++ return fmt.Errorf("failed to update key: %v", err) ++ } ++ ++ hc.nonce, err = hc.deriveSecret(newTrafficSecret, []byte(tls13Nonce), hc.cs.nonceSize()) ++ if err != nil { ++ return fmt.Errorf("failed to update nonce: %v", err) ++ } ++ ++ hc.aeadCrypter, err = hc.cs.aeadCrypter(key) ++ if err != nil { ++ return fmt.Errorf("failed to update AEAD crypter: %v", err) ++ } ++ return nil ++} ++ ++// getAndIncrement returns the current sequence number and increments it. Note ++// that the mutex must be held while calling this function. ++func (hc *S2AHalfConnection) getAndIncrementSequence() (uint64, error) { ++ sequence, err := hc.sequence.value() ++ if err != nil { ++ return 0, err ++ } ++ hc.sequence.increment() ++ return sequence, nil ++} ++ ++// maskedNonce creates a copy of the nonce that is masked with the sequence ++// number. Note that the mutex must be held while calling this function. ++func (hc *S2AHalfConnection) maskedNonce(sequence uint64) []byte { ++ const uint64Size = 8 ++ nonce := make([]byte, len(hc.nonce)) ++ copy(nonce, hc.nonce) ++ for i := 0; i < uint64Size; i++ { ++ nonce[aeadcrypter.NonceSize-uint64Size+i] ^= byte(sequence >> uint64(56-uint64Size*i)) ++ } ++ return nonce ++} ++ ++// deriveSecret implements the Derive-Secret function, as specified in ++// https://tools.ietf.org/html/rfc8446#section-7.1. ++func (hc *S2AHalfConnection) deriveSecret(secret, label []byte, length int) ([]byte, error) { ++ var hkdfLabel cryptobyte.Builder ++ hkdfLabel.AddUint16(uint16(length)) ++ hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { ++ b.AddBytes(label) ++ }) ++ // Append an empty `Context` field to the label, as specified in the RFC. ++ // The half connection does not use the `Context` field. ++ hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { ++ b.AddBytes([]byte("")) ++ }) ++ hkdfLabelBytes, err := hkdfLabel.Bytes() ++ if err != nil { ++ return nil, fmt.Errorf("deriveSecret failed: %v", err) ++ } ++ return hc.expander.expand(secret, hkdfLabelBytes, length) ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/record.go b/vendor/github.com/google/s2a-go/internal/record/record.go +new file mode 100644 +index 00000000000..c60515510a7 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/record.go +@@ -0,0 +1,757 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package record implements the TLS 1.3 record protocol used by the S2A ++// transport credentials. ++package record ++ ++import ( ++ "encoding/binary" ++ "errors" ++ "fmt" ++ "math" ++ "net" ++ "sync" ++ ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ "github.com/google/s2a-go/internal/record/internal/halfconn" ++ "github.com/google/s2a-go/internal/tokenmanager" ++ "google.golang.org/grpc/grpclog" ++) ++ ++// recordType is the `ContentType` as described in ++// https://tools.ietf.org/html/rfc8446#section-5.1. ++type recordType byte ++ ++const ( ++ alert recordType = 21 ++ handshake recordType = 22 ++ applicationData recordType = 23 ++) ++ ++// keyUpdateRequest is the `KeyUpdateRequest` as described in ++// https://tools.ietf.org/html/rfc8446#section-4.6.3. ++type keyUpdateRequest byte ++ ++const ( ++ updateNotRequested keyUpdateRequest = 0 ++ updateRequested keyUpdateRequest = 1 ++) ++ ++// alertDescription is the `AlertDescription` as described in ++// https://tools.ietf.org/html/rfc8446#section-6. ++type alertDescription byte ++ ++const ( ++ closeNotify alertDescription = 0 ++) ++ ++// sessionTicketState is used to determine whether session tickets have not yet ++// been received, are in the process of being received, or have finished ++// receiving. ++type sessionTicketState byte ++ ++const ( ++ ticketsNotYetReceived sessionTicketState = 0 ++ receivingTickets sessionTicketState = 1 ++ notReceivingTickets sessionTicketState = 2 ++) ++ ++const ( ++ // The TLS 1.3-specific constants below (tlsRecordMaxPlaintextSize, ++ // tlsRecordHeaderSize, tlsRecordTypeSize) were taken from ++ // https://tools.ietf.org/html/rfc8446#section-5.1. ++ ++ // tlsRecordMaxPlaintextSize is the maximum size in bytes of the plaintext ++ // in a single TLS 1.3 record. ++ tlsRecordMaxPlaintextSize = 16384 // 2^14 ++ // tlsRecordTypeSize is the size in bytes of the TLS 1.3 record type. ++ tlsRecordTypeSize = 1 ++ // tlsTagSize is the size in bytes of the tag of the following three ++ // ciphersuites: AES-128-GCM-SHA256, AES-256-GCM-SHA384, ++ // CHACHA20-POLY1305-SHA256. ++ tlsTagSize = 16 ++ // tlsRecordMaxPayloadSize is the maximum size in bytes of the payload in a ++ // single TLS 1.3 record. This is the maximum size of the plaintext plus the ++ // record type byte and 16 bytes of the tag. ++ tlsRecordMaxPayloadSize = tlsRecordMaxPlaintextSize + tlsRecordTypeSize + tlsTagSize ++ // tlsRecordHeaderTypeSize is the size in bytes of the TLS 1.3 record ++ // header type. ++ tlsRecordHeaderTypeSize = 1 ++ // tlsRecordHeaderLegacyRecordVersionSize is the size in bytes of the TLS ++ // 1.3 record header legacy record version. ++ tlsRecordHeaderLegacyRecordVersionSize = 2 ++ // tlsRecordHeaderPayloadLengthSize is the size in bytes of the TLS 1.3 ++ // record header payload length. ++ tlsRecordHeaderPayloadLengthSize = 2 ++ // tlsRecordHeaderSize is the size in bytes of the TLS 1.3 record header. ++ tlsRecordHeaderSize = tlsRecordHeaderTypeSize + tlsRecordHeaderLegacyRecordVersionSize + tlsRecordHeaderPayloadLengthSize ++ // tlsRecordMaxSize ++ tlsRecordMaxSize = tlsRecordMaxPayloadSize + tlsRecordHeaderSize ++ // tlsApplicationData is the application data type of the TLS 1.3 record ++ // header. ++ tlsApplicationData = 23 ++ // tlsLegacyRecordVersion is the legacy record version of the TLS record. ++ tlsLegacyRecordVersion = 3 ++ // tlsAlertSize is the size in bytes of an alert of TLS 1.3. ++ tlsAlertSize = 2 ++) ++ ++const ( ++ // These are TLS 1.3 handshake-specific constants. ++ ++ // tlsHandshakeNewSessionTicketType is the prefix of a handshake new session ++ // ticket message of TLS 1.3. ++ tlsHandshakeNewSessionTicketType = 4 ++ // tlsHandshakeKeyUpdateType is the prefix of a handshake key update message ++ // of TLS 1.3. ++ tlsHandshakeKeyUpdateType = 24 ++ // tlsHandshakeMsgTypeSize is the size in bytes of the TLS 1.3 handshake ++ // message type field. ++ tlsHandshakeMsgTypeSize = 1 ++ // tlsHandshakeLengthSize is the size in bytes of the TLS 1.3 handshake ++ // message length field. ++ tlsHandshakeLengthSize = 3 ++ // tlsHandshakeKeyUpdateMsgSize is the size in bytes of the TLS 1.3 ++ // handshake key update message. ++ tlsHandshakeKeyUpdateMsgSize = 1 ++ // tlsHandshakePrefixSize is the size in bytes of the prefix of the TLS 1.3 ++ // handshake message. ++ tlsHandshakePrefixSize = 4 ++ // tlsMaxSessionTicketSize is the maximum size of a NewSessionTicket message ++ // in TLS 1.3. This is the sum of the max sizes of all the fields in the ++ // NewSessionTicket struct specified in ++ // https://tools.ietf.org/html/rfc8446#section-4.6.1. ++ tlsMaxSessionTicketSize = 131338 ++) ++ ++const ( ++ // outBufMaxRecords is the maximum number of records that can fit in the ++ // ourRecordsBuf buffer. ++ outBufMaxRecords = 16 ++ // outBufMaxSize is the maximum size (in bytes) of the outRecordsBuf buffer. ++ outBufMaxSize = outBufMaxRecords * tlsRecordMaxSize ++ // maxAllowedTickets is the maximum number of session tickets that are ++ // allowed. The number of tickets are limited to ensure that the size of the ++ // ticket queue does not grow indefinitely. S2A also keeps a limit on the ++ // number of tickets that it caches. ++ maxAllowedTickets = 5 ++) ++ ++// preConstructedKeyUpdateMsg holds the key update message. This is needed as an ++// optimization so that the same message does not need to be constructed every ++// time a key update message is sent. ++var preConstructedKeyUpdateMsg = buildKeyUpdateRequest() ++ ++// conn represents a secured TLS connection. It implements the net.Conn ++// interface. ++type conn struct { ++ net.Conn ++ // inConn is the half connection responsible for decrypting incoming bytes. ++ inConn *halfconn.S2AHalfConnection ++ // outConn is the half connection responsible for encrypting outgoing bytes. ++ outConn *halfconn.S2AHalfConnection ++ // pendingApplicationData holds data that has been read from the connection ++ // and decrypted, but has not yet been returned by Read. ++ pendingApplicationData []byte ++ // unusedBuf holds data read from the network that has not yet been ++ // decrypted. This data might not consist of a complete record. It may ++ // consist of several records, the last of which could be incomplete. ++ unusedBuf []byte ++ // outRecordsBuf is a buffer used to store outgoing TLS records before ++ // they are written to the network. ++ outRecordsBuf []byte ++ // nextRecord stores the next record info in the unusedBuf buffer. ++ nextRecord []byte ++ // overheadSize is the overhead size in bytes of each TLS 1.3 record, which ++ // is computed as overheadSize = header size + record type byte + tag size. ++ // Note that there is no padding by zeros in the overhead calculation. ++ overheadSize int ++ // readMutex guards against concurrent calls to Read. This is required since ++ // Close may be called during a Read. ++ readMutex sync.Mutex ++ // writeMutex guards against concurrent calls to Write. This is required ++ // since Close may be called during a Write, and also because a key update ++ // message may be written during a Read. ++ writeMutex sync.Mutex ++ // handshakeBuf holds handshake messages while they are being processed. ++ handshakeBuf []byte ++ // ticketState is the current processing state of the session tickets. ++ ticketState sessionTicketState ++ // sessionTickets holds the completed session tickets until they are sent to ++ // the handshaker service for processing. ++ sessionTickets [][]byte ++ // ticketSender sends session tickets to the S2A handshaker service. ++ ticketSender s2aTicketSender ++ // callComplete is a channel that blocks closing the record protocol until a ++ // pending call to the S2A completes. ++ callComplete chan bool ++} ++ ++// ConnParameters holds the parameters used for creating a new conn object. ++type ConnParameters struct { ++ // NetConn is the TCP connection to the peer. This parameter is required. ++ NetConn net.Conn ++ // Ciphersuite is the TLS ciphersuite negotiated by the S2A handshaker ++ // service. This parameter is required. ++ Ciphersuite commonpb.Ciphersuite ++ // TLSVersion is the TLS version number negotiated by the S2A handshaker ++ // service. This parameter is required. ++ TLSVersion commonpb.TLSVersion ++ // InTrafficSecret is the traffic secret used to derive the session key for ++ // the inbound direction. This parameter is required. ++ InTrafficSecret []byte ++ // OutTrafficSecret is the traffic secret used to derive the session key ++ // for the outbound direction. This parameter is required. ++ OutTrafficSecret []byte ++ // UnusedBuf is the data read from the network that has not yet been ++ // decrypted. This parameter is optional. If not provided, then no ++ // application data was sent in the same flight of messages as the final ++ // handshake message. ++ UnusedBuf []byte ++ // InSequence is the sequence number of the next, incoming, TLS record. ++ // This parameter is required. ++ InSequence uint64 ++ // OutSequence is the sequence number of the next, outgoing, TLS record. ++ // This parameter is required. ++ OutSequence uint64 ++ // HSAddr stores the address of the S2A handshaker service. This parameter ++ // is optional. If not provided, then TLS resumption is disabled. ++ HSAddr string ++ // ConnectionId is the connection identifier that was created and sent by ++ // S2A at the end of a handshake. ++ ConnectionID uint64 ++ // LocalIdentity is the local identity that was used by S2A during session ++ // setup and included in the session result. ++ LocalIdentity *commonpb.Identity ++ // EnsureProcessSessionTickets allows users to wait and ensure that all ++ // available session tickets are sent to S2A before a process completes. ++ EnsureProcessSessionTickets *sync.WaitGroup ++} ++ ++// NewConn creates a TLS record protocol that wraps the TCP connection. ++func NewConn(o *ConnParameters) (net.Conn, error) { ++ if o == nil { ++ return nil, errors.New("conn options must not be nil") ++ } ++ if o.TLSVersion != commonpb.TLSVersion_TLS1_3 { ++ return nil, errors.New("TLS version must be TLS 1.3") ++ } ++ ++ inConn, err := halfconn.New(o.Ciphersuite, o.InTrafficSecret, o.InSequence) ++ if err != nil { ++ return nil, fmt.Errorf("failed to create inbound half connection: %v", err) ++ } ++ outConn, err := halfconn.New(o.Ciphersuite, o.OutTrafficSecret, o.OutSequence) ++ if err != nil { ++ return nil, fmt.Errorf("failed to create outbound half connection: %v", err) ++ } ++ ++ // The tag size for the in/out connections should be the same. ++ overheadSize := tlsRecordHeaderSize + tlsRecordTypeSize + inConn.TagSize() ++ var unusedBuf []byte ++ if o.UnusedBuf == nil { ++ // We pre-allocate unusedBuf to be of size ++ // 2*tlsRecordMaxSize-1 during initialization. We only read from the ++ // network into unusedBuf when unusedBuf does not contain a complete ++ // record and the incomplete record is at most tlsRecordMaxSize-1 ++ // (bytes). And we read at most tlsRecordMaxSize bytes of data from the ++ // network into unusedBuf at one time. Therefore, 2*tlsRecordMaxSize-1 ++ // is large enough to buffer data read from the network. ++ unusedBuf = make([]byte, 0, 2*tlsRecordMaxSize-1) ++ } else { ++ unusedBuf = make([]byte, len(o.UnusedBuf)) ++ copy(unusedBuf, o.UnusedBuf) ++ } ++ ++ tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() ++ if err != nil { ++ grpclog.Infof("failed to create single token access token manager: %v", err) ++ } ++ ++ s2aConn := &conn{ ++ Conn: o.NetConn, ++ inConn: inConn, ++ outConn: outConn, ++ unusedBuf: unusedBuf, ++ outRecordsBuf: make([]byte, tlsRecordMaxSize), ++ nextRecord: unusedBuf, ++ overheadSize: overheadSize, ++ ticketState: ticketsNotYetReceived, ++ // Pre-allocate the buffer for one session ticket message and the max ++ // plaintext size. This is the largest size that handshakeBuf will need ++ // to hold. The largest incomplete handshake message is the ++ // [handshake header size] + [max session ticket size] - 1. ++ // Then, tlsRecordMaxPlaintextSize is the maximum size that will be ++ // appended to the handshakeBuf before the handshake message is ++ // completed. Therefore, the buffer size below should be large enough to ++ // buffer any handshake messages. ++ handshakeBuf: make([]byte, 0, tlsHandshakePrefixSize+tlsMaxSessionTicketSize+tlsRecordMaxPlaintextSize-1), ++ ticketSender: &ticketSender{ ++ hsAddr: o.HSAddr, ++ connectionID: o.ConnectionID, ++ localIdentity: o.LocalIdentity, ++ tokenManager: tokenManager, ++ ensureProcessSessionTickets: o.EnsureProcessSessionTickets, ++ }, ++ callComplete: make(chan bool), ++ } ++ return s2aConn, nil ++} ++ ++// Read reads and decrypts a TLS 1.3 record from the underlying connection, and ++// copies any application data received from the peer into b. If the size of the ++// payload is greater than len(b), Read retains the remaining bytes in an ++// internal buffer, and subsequent calls to Read will read from this buffer ++// until it is exhausted. At most 1 TLS record worth of application data is ++// written to b for each call to Read. ++// ++// Note that for the user to efficiently call this method, the user should ++// ensure that the buffer b is allocated such that the buffer does not have any ++// unused segments. This can be done by calling Read via io.ReadFull, which ++// continually calls Read until the specified buffer has been filled. Also note ++// that the user should close the connection via Close() if an error is thrown ++// by a call to Read. ++func (p *conn) Read(b []byte) (n int, err error) { ++ p.readMutex.Lock() ++ defer p.readMutex.Unlock() ++ // Check if p.pendingApplication data has leftover application data from ++ // the previous call to Read. ++ if len(p.pendingApplicationData) == 0 { ++ // Read a full record from the wire. ++ record, err := p.readFullRecord() ++ if err != nil { ++ return 0, err ++ } ++ // Now we have a complete record, so split the header and validate it ++ // The TLS record is split into 2 pieces: the record header and the ++ // payload. The payload has the following form: ++ // [payload] = [ciphertext of application data] ++ // + [ciphertext of record type byte] ++ // + [(optionally) ciphertext of padding by zeros] ++ // + [tag] ++ header, payload, err := splitAndValidateHeader(record) ++ if err != nil { ++ return 0, err ++ } ++ // Decrypt the ciphertext. ++ p.pendingApplicationData, err = p.inConn.Decrypt(payload[:0], payload, header) ++ if err != nil { ++ return 0, err ++ } ++ // Remove the padding by zeros and the record type byte from the ++ // p.pendingApplicationData buffer. ++ msgType, err := p.stripPaddingAndType() ++ if err != nil { ++ return 0, err ++ } ++ // Check that the length of the plaintext after stripping the padding ++ // and record type byte is under the maximum plaintext size. ++ if len(p.pendingApplicationData) > tlsRecordMaxPlaintextSize { ++ return 0, errors.New("plaintext size larger than maximum") ++ } ++ // The expected message types are application data, alert, and ++ // handshake. For application data, the bytes are directly copied into ++ // b. For an alert, the type of the alert is checked and the connection ++ // is closed on a close notify alert. For a handshake message, the ++ // handshake message type is checked. The handshake message type can be ++ // a key update type, for which we advance the traffic secret, and a ++ // new session ticket type, for which we send the received ticket to S2A ++ // for processing. ++ switch msgType { ++ case applicationData: ++ if len(p.handshakeBuf) > 0 { ++ return 0, errors.New("application data received while processing fragmented handshake messages") ++ } ++ if p.ticketState == receivingTickets { ++ p.ticketState = notReceivingTickets ++ grpclog.Infof("Sending session tickets to S2A.") ++ p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) ++ } ++ case alert: ++ return 0, p.handleAlertMessage() ++ case handshake: ++ if err = p.handleHandshakeMessage(); err != nil { ++ return 0, err ++ } ++ return 0, nil ++ default: ++ return 0, errors.New("unknown record type") ++ } ++ } ++ // Write as much application data as possible to b, the output buffer. ++ n = copy(b, p.pendingApplicationData) ++ p.pendingApplicationData = p.pendingApplicationData[n:] ++ return n, nil ++} ++ ++// Write divides b into segments of size tlsRecordMaxPlaintextSize, builds a ++// TLS 1.3 record (of type "application data") from each segment, and sends ++// the record to the peer. It returns the number of plaintext bytes that were ++// successfully sent to the peer. ++func (p *conn) Write(b []byte) (n int, err error) { ++ p.writeMutex.Lock() ++ defer p.writeMutex.Unlock() ++ return p.writeTLSRecord(b, tlsApplicationData) ++} ++ ++// writeTLSRecord divides b into segments of size maxPlaintextBytesPerRecord, ++// builds a TLS 1.3 record (of type recordType) from each segment, and sends ++// the record to the peer. It returns the number of plaintext bytes that were ++// successfully sent to the peer. ++func (p *conn) writeTLSRecord(b []byte, recordType byte) (n int, err error) { ++ // Create a record of only header, record type, and tag if given empty ++ // byte array. ++ if len(b) == 0 { ++ recordEndIndex, _, err := p.buildRecord(b, recordType, 0) ++ if err != nil { ++ return 0, err ++ } ++ ++ // Write the bytes stored in outRecordsBuf to p.Conn. Since we return ++ // the number of plaintext bytes written without overhead, we will ++ // always return 0 while p.Conn.Write returns the entire record length. ++ _, err = p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) ++ return 0, err ++ } ++ ++ numRecords := int(math.Ceil(float64(len(b)) / float64(tlsRecordMaxPlaintextSize))) ++ totalRecordsSize := len(b) + numRecords*p.overheadSize ++ partialBSize := len(b) ++ if totalRecordsSize > outBufMaxSize { ++ totalRecordsSize = outBufMaxSize ++ partialBSize = outBufMaxRecords * tlsRecordMaxPlaintextSize ++ } ++ if len(p.outRecordsBuf) < totalRecordsSize { ++ p.outRecordsBuf = make([]byte, totalRecordsSize) ++ } ++ for bStart := 0; bStart < len(b); bStart += partialBSize { ++ bEnd := bStart + partialBSize ++ if bEnd > len(b) { ++ bEnd = len(b) ++ } ++ partialB := b[bStart:bEnd] ++ recordEndIndex := 0 ++ for len(partialB) > 0 { ++ recordEndIndex, partialB, err = p.buildRecord(partialB, recordType, recordEndIndex) ++ if err != nil { ++ // Return the amount of bytes written prior to the error. ++ return bStart, err ++ } ++ } ++ // Write the bytes stored in outRecordsBuf to p.Conn. If there is an ++ // error, calculate the total number of plaintext bytes of complete ++ // records successfully written to the peer and return it. ++ nn, err := p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) ++ if err != nil { ++ numberOfCompletedRecords := int(math.Floor(float64(nn) / float64(tlsRecordMaxSize))) ++ return bStart + numberOfCompletedRecords*tlsRecordMaxPlaintextSize, err ++ } ++ } ++ return len(b), nil ++} ++ ++// buildRecord builds a TLS 1.3 record of type recordType from plaintext, ++// and writes the record to outRecordsBuf at recordStartIndex. The record will ++// have at most tlsRecordMaxPlaintextSize bytes of payload. It returns the ++// index of outRecordsBuf where the current record ends, as well as any ++// remaining plaintext bytes. ++func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex int) (n int, remainingPlaintext []byte, err error) { ++ // Construct the payload, which consists of application data and record type. ++ dataLen := len(plaintext) ++ if dataLen > tlsRecordMaxPlaintextSize { ++ dataLen = tlsRecordMaxPlaintextSize ++ } ++ remainingPlaintext = plaintext[dataLen:] ++ newRecordBuf := p.outRecordsBuf[recordStartIndex:] ++ ++ copy(newRecordBuf[tlsRecordHeaderSize:], plaintext[:dataLen]) ++ newRecordBuf[tlsRecordHeaderSize+dataLen] = recordType ++ payload := newRecordBuf[tlsRecordHeaderSize : tlsRecordHeaderSize+dataLen+1] // 1 is for the recordType. ++ // Construct the header. ++ newRecordBuf[0] = tlsApplicationData ++ newRecordBuf[1] = tlsLegacyRecordVersion ++ newRecordBuf[2] = tlsLegacyRecordVersion ++ binary.BigEndian.PutUint16(newRecordBuf[3:], uint16(len(payload)+tlsTagSize)) ++ header := newRecordBuf[:tlsRecordHeaderSize] ++ ++ // Encrypt the payload using header as aad. ++ encryptedPayload, err := p.outConn.Encrypt(newRecordBuf[tlsRecordHeaderSize:][:0], payload, header) ++ if err != nil { ++ return 0, plaintext, err ++ } ++ recordStartIndex += len(header) + len(encryptedPayload) ++ return recordStartIndex, remainingPlaintext, nil ++} ++ ++func (p *conn) Close() error { ++ p.readMutex.Lock() ++ defer p.readMutex.Unlock() ++ p.writeMutex.Lock() ++ defer p.writeMutex.Unlock() ++ // If p.ticketState is equal to notReceivingTickets, then S2A has ++ // been sent a flight of session tickets, and we must wait for the ++ // call to S2A to complete before closing the record protocol. ++ if p.ticketState == notReceivingTickets { ++ <-p.callComplete ++ grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") ++ } ++ return p.Conn.Close() ++} ++ ++// stripPaddingAndType strips the padding by zeros and record type from ++// p.pendingApplicationData and returns the record type. Note that ++// p.pendingApplicationData should be of the form: ++// [application data] + [record type byte] + [trailing zeros] ++func (p *conn) stripPaddingAndType() (recordType, error) { ++ if len(p.pendingApplicationData) == 0 { ++ return 0, errors.New("application data had length 0") ++ } ++ i := len(p.pendingApplicationData) - 1 ++ // Search for the index of the record type byte. ++ for i > 0 { ++ if p.pendingApplicationData[i] != 0 { ++ break ++ } ++ i-- ++ } ++ rt := recordType(p.pendingApplicationData[i]) ++ p.pendingApplicationData = p.pendingApplicationData[:i] ++ return rt, nil ++} ++ ++// readFullRecord reads from the wire until a record is completed and returns ++// the full record. ++func (p *conn) readFullRecord() (fullRecord []byte, err error) { ++ fullRecord, p.nextRecord, err = parseReadBuffer(p.nextRecord, tlsRecordMaxPayloadSize) ++ if err != nil { ++ return nil, err ++ } ++ // Check whether the next record to be decrypted has been completely ++ // received. ++ if len(fullRecord) == 0 { ++ copy(p.unusedBuf, p.nextRecord) ++ p.unusedBuf = p.unusedBuf[:len(p.nextRecord)] ++ // Always copy next incomplete record to the beginning of the ++ // unusedBuf buffer and reset nextRecord to it. ++ p.nextRecord = p.unusedBuf ++ } ++ // Keep reading from the wire until we have a complete record. ++ for len(fullRecord) == 0 { ++ if len(p.unusedBuf) == cap(p.unusedBuf) { ++ tmp := make([]byte, len(p.unusedBuf), cap(p.unusedBuf)+tlsRecordMaxSize) ++ copy(tmp, p.unusedBuf) ++ p.unusedBuf = tmp ++ } ++ n, err := p.Conn.Read(p.unusedBuf[len(p.unusedBuf):min(cap(p.unusedBuf), len(p.unusedBuf)+tlsRecordMaxSize)]) ++ if err != nil { ++ return nil, err ++ } ++ p.unusedBuf = p.unusedBuf[:len(p.unusedBuf)+n] ++ fullRecord, p.nextRecord, err = parseReadBuffer(p.unusedBuf, tlsRecordMaxPayloadSize) ++ if err != nil { ++ return nil, err ++ } ++ } ++ return fullRecord, nil ++} ++ ++// parseReadBuffer parses the provided buffer and returns a full record and any ++// remaining bytes in that buffer. If the record is incomplete, nil is returned ++// for the first return value and the given byte buffer is returned for the ++// second return value. The length of the payload specified by the header should ++// not be greater than maxLen, otherwise an error is returned. Note that this ++// function does not allocate or copy any buffers. ++func parseReadBuffer(b []byte, maxLen uint16) (fullRecord, remaining []byte, err error) { ++ // If the header is not complete, return the provided buffer as remaining ++ // buffer. ++ if len(b) < tlsRecordHeaderSize { ++ return nil, b, nil ++ } ++ msgLenField := b[tlsRecordHeaderTypeSize+tlsRecordHeaderLegacyRecordVersionSize : tlsRecordHeaderSize] ++ length := binary.BigEndian.Uint16(msgLenField) ++ if length > maxLen { ++ return nil, nil, fmt.Errorf("record length larger than the limit %d", maxLen) ++ } ++ if len(b) < int(length)+tlsRecordHeaderSize { ++ // Record is not complete yet. ++ return nil, b, nil ++ } ++ return b[:tlsRecordHeaderSize+length], b[tlsRecordHeaderSize+length:], nil ++} ++ ++// splitAndValidateHeader splits the header from the payload in the TLS 1.3 ++// record and returns them. Note that the header is checked for validity, and an ++// error is returned when an invalid header is parsed. Also note that this ++// function does not allocate or copy any buffers. ++func splitAndValidateHeader(record []byte) (header, payload []byte, err error) { ++ if len(record) < tlsRecordHeaderSize { ++ return nil, nil, fmt.Errorf("record was smaller than the header size") ++ } ++ header = record[:tlsRecordHeaderSize] ++ payload = record[tlsRecordHeaderSize:] ++ if header[0] != tlsApplicationData { ++ return nil, nil, fmt.Errorf("incorrect type in the header") ++ } ++ // Check the legacy record version, which should be 0x03, 0x03. ++ if header[1] != 0x03 || header[2] != 0x03 { ++ return nil, nil, fmt.Errorf("incorrect legacy record version in the header") ++ } ++ return header, payload, nil ++} ++ ++// handleAlertMessage handles an alert message. ++func (p *conn) handleAlertMessage() error { ++ if len(p.pendingApplicationData) != tlsAlertSize { ++ return errors.New("invalid alert message size") ++ } ++ alertType := p.pendingApplicationData[1] ++ // Clear the body of the alert message. ++ p.pendingApplicationData = p.pendingApplicationData[:0] ++ if alertType == byte(closeNotify) { ++ return errors.New("received a close notify alert") ++ } ++ // TODO(matthewstevenson88): Add support for more alert types. ++ return fmt.Errorf("received an unrecognized alert type: %v", alertType) ++} ++ ++// parseHandshakeHeader parses a handshake message from the handshake buffer. ++// It returns the message type, the message length, the message, the raw message ++// that includes the type and length bytes and a flag indicating whether the ++// handshake message has been fully parsed. i.e. whether the entire handshake ++// message was in the handshake buffer. ++func (p *conn) parseHandshakeMsg() (msgType byte, msgLen uint32, msg []byte, rawMsg []byte, ok bool) { ++ // Handle the case where the 4 byte handshake header is fragmented. ++ if len(p.handshakeBuf) < tlsHandshakePrefixSize { ++ return 0, 0, nil, nil, false ++ } ++ msgType = p.handshakeBuf[0] ++ msgLen = bigEndianInt24(p.handshakeBuf[tlsHandshakeMsgTypeSize : tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize]) ++ if msgLen > uint32(len(p.handshakeBuf)-tlsHandshakePrefixSize) { ++ return 0, 0, nil, nil, false ++ } ++ msg = p.handshakeBuf[tlsHandshakePrefixSize : tlsHandshakePrefixSize+msgLen] ++ rawMsg = p.handshakeBuf[:tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize+msgLen] ++ p.handshakeBuf = p.handshakeBuf[tlsHandshakePrefixSize+msgLen:] ++ return msgType, msgLen, msg, rawMsg, true ++} ++ ++// handleHandshakeMessage handles a handshake message. Note that the first ++// complete handshake message from the handshake buffer is removed, if it ++// exists. ++func (p *conn) handleHandshakeMessage() error { ++ // Copy the pending application data to the handshake buffer. At this point, ++ // we are guaranteed that the pending application data contains only parts ++ // of a handshake message. ++ p.handshakeBuf = append(p.handshakeBuf, p.pendingApplicationData...) ++ p.pendingApplicationData = p.pendingApplicationData[:0] ++ // Several handshake messages may be coalesced into a single record. ++ // Continue reading them until the handshake buffer is empty. ++ for len(p.handshakeBuf) > 0 { ++ handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() ++ if !ok { ++ // The handshake could not be fully parsed, so read in another ++ // record and try again later. ++ break ++ } ++ switch handshakeMsgType { ++ case tlsHandshakeKeyUpdateType: ++ if msgLen != tlsHandshakeKeyUpdateMsgSize { ++ return errors.New("invalid handshake key update message length") ++ } ++ if len(p.handshakeBuf) != 0 { ++ return errors.New("key update message must be the last message of a handshake record") ++ } ++ if err := p.handleKeyUpdateMsg(msg); err != nil { ++ return err ++ } ++ case tlsHandshakeNewSessionTicketType: ++ // Ignore tickets that are received after a batch of tickets has ++ // been sent to S2A. ++ if p.ticketState == notReceivingTickets { ++ continue ++ } ++ if p.ticketState == ticketsNotYetReceived { ++ p.ticketState = receivingTickets ++ } ++ p.sessionTickets = append(p.sessionTickets, rawMsg) ++ if len(p.sessionTickets) == maxAllowedTickets { ++ p.ticketState = notReceivingTickets ++ grpclog.Infof("Sending session tickets to S2A.") ++ p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) ++ } ++ default: ++ return errors.New("unknown handshake message type") ++ } ++ } ++ return nil ++} ++ ++func buildKeyUpdateRequest() []byte { ++ b := make([]byte, tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize) ++ b[0] = tlsHandshakeKeyUpdateType ++ b[1] = 0 ++ b[2] = 0 ++ b[3] = tlsHandshakeKeyUpdateMsgSize ++ b[4] = byte(updateNotRequested) ++ return b ++} ++ ++// handleKeyUpdateMsg handles a key update message. ++func (p *conn) handleKeyUpdateMsg(msg []byte) error { ++ keyUpdateRequest := msg[0] ++ if keyUpdateRequest != byte(updateNotRequested) && ++ keyUpdateRequest != byte(updateRequested) { ++ return errors.New("invalid handshake key update message") ++ } ++ if err := p.inConn.UpdateKey(); err != nil { ++ return err ++ } ++ // Send a key update message back to the peer if requested. ++ if keyUpdateRequest == byte(updateRequested) { ++ p.writeMutex.Lock() ++ defer p.writeMutex.Unlock() ++ n, err := p.writeTLSRecord(preConstructedKeyUpdateMsg, byte(handshake)) ++ if err != nil { ++ return err ++ } ++ if n != tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize { ++ return errors.New("key update request message wrote less bytes than expected") ++ } ++ if err = p.outConn.UpdateKey(); err != nil { ++ return err ++ } ++ } ++ return nil ++} ++ ++// bidEndianInt24 converts the given byte buffer of at least size 3 and ++// outputs the resulting 24 bit integer as a uint32. This is needed because ++// TLS 1.3 requires 3 byte integers, and the binary.BigEndian package does ++// not provide a way to transform a byte buffer into a 3 byte integer. ++func bigEndianInt24(b []byte) uint32 { ++ _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 ++ return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 ++} ++ ++func min(a, b int) int { ++ if a < b { ++ return a ++ } ++ return b ++} +diff --git a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go +new file mode 100644 +index 00000000000..33fa3c55d47 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go +@@ -0,0 +1,176 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package record ++ ++import ( ++ "context" ++ "fmt" ++ "sync" ++ "time" ++ ++ "github.com/google/s2a-go/internal/handshaker/service" ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" ++ "github.com/google/s2a-go/internal/tokenmanager" ++ "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/grpclog" ++) ++ ++// sessionTimeout is the timeout for creating a session with the S2A handshaker ++// service. ++const sessionTimeout = time.Second * 5 ++ ++// s2aTicketSender sends session tickets to the S2A handshaker service. ++type s2aTicketSender interface { ++ // sendTicketsToS2A sends the given session tickets to the S2A handshaker ++ // service. ++ sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) ++} ++ ++// ticketStream is the stream used to send and receive session information. ++type ticketStream interface { ++ Send(*s2apb.SessionReq) error ++ Recv() (*s2apb.SessionResp, error) ++} ++ ++type ticketSender struct { ++ // hsAddr stores the address of the S2A handshaker service. ++ hsAddr string ++ // connectionID is the connection identifier that was created and sent by ++ // S2A at the end of a handshake. ++ connectionID uint64 ++ // localIdentity is the local identity that was used by S2A during session ++ // setup and included in the session result. ++ localIdentity *commonpb.Identity ++ // tokenManager manages access tokens for authenticating to S2A. ++ tokenManager tokenmanager.AccessTokenManager ++ // ensureProcessSessionTickets allows users to wait and ensure that all ++ // available session tickets are sent to S2A before a process completes. ++ ensureProcessSessionTickets *sync.WaitGroup ++} ++ ++// sendTicketsToS2A sends the given sessionTickets to the S2A handshaker ++// service. This is done asynchronously and writes to the error logs if an error ++// occurs. ++func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) { ++ // Note that the goroutine is in the function rather than at the caller ++ // because the fake ticket sender used for testing must run synchronously ++ // so that the session tickets can be accessed from it after the tests have ++ // been run. ++ if t.ensureProcessSessionTickets != nil { ++ t.ensureProcessSessionTickets.Add(1) ++ } ++ go func() { ++ if err := func() error { ++ defer func() { ++ if t.ensureProcessSessionTickets != nil { ++ t.ensureProcessSessionTickets.Done() ++ } ++ }() ++ hsConn, err := service.Dial(t.hsAddr) ++ if err != nil { ++ return err ++ } ++ client := s2apb.NewS2AServiceClient(hsConn) ++ ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) ++ defer cancel() ++ session, err := client.SetUpSession(ctx) ++ if err != nil { ++ return err ++ } ++ defer func() { ++ if err := session.CloseSend(); err != nil { ++ grpclog.Error(err) ++ } ++ }() ++ return t.writeTicketsToStream(session, sessionTickets) ++ }(); err != nil { ++ grpclog.Errorf("failed to send resumption tickets to S2A with identity: %v, %v", ++ t.localIdentity, err) ++ } ++ callComplete <- true ++ close(callComplete) ++ }() ++} ++ ++// writeTicketsToStream writes the given session tickets to the given stream. ++func (t *ticketSender) writeTicketsToStream(stream ticketStream, sessionTickets [][]byte) error { ++ if err := stream.Send( ++ &s2apb.SessionReq{ ++ ReqOneof: &s2apb.SessionReq_ResumptionTicket{ ++ ResumptionTicket: &s2apb.ResumptionTicketReq{ ++ InBytes: sessionTickets, ++ ConnectionId: t.connectionID, ++ LocalIdentity: t.localIdentity, ++ }, ++ }, ++ AuthMechanisms: t.getAuthMechanisms(), ++ }, ++ ); err != nil { ++ return err ++ } ++ sessionResp, err := stream.Recv() ++ if err != nil { ++ return err ++ } ++ if sessionResp.GetStatus().GetCode() != uint32(codes.OK) { ++ return fmt.Errorf("s2a session ticket response had error status: %v, %v", ++ sessionResp.GetStatus().GetCode(), sessionResp.GetStatus().GetDetails()) ++ } ++ return nil ++} ++ ++func (t *ticketSender) getAuthMechanisms() []*s2apb.AuthenticationMechanism { ++ if t.tokenManager == nil { ++ return nil ++ } ++ // First handle the special case when no local identity has been provided ++ // by the application. In this case, an AuthenticationMechanism with no local ++ // identity will be sent. ++ if t.localIdentity == nil { ++ token, err := t.tokenManager.DefaultToken() ++ if err != nil { ++ grpclog.Infof("unable to get token for empty local identity: %v", err) ++ return nil ++ } ++ return []*s2apb.AuthenticationMechanism{ ++ { ++ MechanismOneof: &s2apb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }, ++ } ++ } ++ ++ // Next, handle the case where the application (or the S2A) has specified ++ // a local identity. ++ token, err := t.tokenManager.Token(t.localIdentity) ++ if err != nil { ++ grpclog.Infof("unable to get token for local identity %v: %v", t.localIdentity, err) ++ return nil ++ } ++ return []*s2apb.AuthenticationMechanism{ ++ { ++ Identity: t.localIdentity, ++ MechanismOneof: &s2apb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }, ++ } ++} +diff --git a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +new file mode 100644 +index 00000000000..ec96ba3b6a6 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +@@ -0,0 +1,70 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package tokenmanager provides tokens for authenticating to S2A. ++package tokenmanager ++ ++import ( ++ "fmt" ++ "os" ++ ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++) ++ ++const ( ++ s2aAccessTokenEnvironmentVariable = "S2A_ACCESS_TOKEN" ++) ++ ++// AccessTokenManager manages tokens for authenticating to S2A. ++type AccessTokenManager interface { ++ // DefaultToken returns a token that an application with no specified local ++ // identity must use to authenticate to S2A. ++ DefaultToken() (token string, err error) ++ // Token returns a token that an application with local identity equal to ++ // identity must use to authenticate to S2A. ++ Token(identity *commonpb.Identity) (token string, err error) ++} ++ ++type singleTokenAccessTokenManager struct { ++ token string ++} ++ ++// NewSingleTokenAccessTokenManager returns a new AccessTokenManager instance ++// that will always manage the same token. ++// ++// The token to be managed is read from the s2aAccessTokenEnvironmentVariable ++// environment variable. If this environment variable is not set, then this ++// function returns an error. ++func NewSingleTokenAccessTokenManager() (AccessTokenManager, error) { ++ token, variableExists := os.LookupEnv(s2aAccessTokenEnvironmentVariable) ++ if !variableExists { ++ return nil, fmt.Errorf("%s environment variable is not set", s2aAccessTokenEnvironmentVariable) ++ } ++ return &singleTokenAccessTokenManager{token: token}, nil ++} ++ ++// DefaultToken always returns the token managed by the ++// singleTokenAccessTokenManager. ++func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { ++ return m.token, nil ++} ++ ++// Token always returns the token managed by the singleTokenAccessTokenManager. ++func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { ++ return m.token, nil ++} +diff --git a/vendor/github.com/google/s2a-go/internal/v2/README.md b/vendor/github.com/google/s2a-go/internal/v2/README.md +new file mode 100644 +index 00000000000..3806d1e9ccc +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/v2/README.md +@@ -0,0 +1 @@ ++**This directory has the implementation of the S2Av2's gRPC-Go client libraries** +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go b/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go +new file mode 100644 +index 00000000000..cc811879b53 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go +@@ -0,0 +1,122 @@ ++/* ++ * ++ * Copyright 2022 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package certverifier offloads verifications to S2Av2. ++package certverifier ++ ++import ( ++ "crypto/x509" ++ "fmt" ++ ++ "github.com/google/s2a-go/stream" ++ "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/grpclog" ++ ++ s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ++) ++ ++// VerifyClientCertificateChain builds a SessionReq, sends it to S2Av2 and ++// receives a SessionResp. ++func VerifyClientCertificateChain(verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { ++ return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { ++ // Offload verification to S2Av2. ++ if grpclog.V(1) { ++ grpclog.Infof("Sending request to S2Av2 for client peer cert chain validation.") ++ } ++ if err := s2AStream.Send(&s2av2pb.SessionReq{ ++ ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ ++ ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ ++ Mode: verificationMode, ++ PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer_{ ++ ClientPeer: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer{ ++ CertificateChain: rawCerts, ++ }, ++ }, ++ }, ++ }, ++ }); err != nil { ++ grpclog.Infof("Failed to send request to S2Av2 for client peer cert chain validation.") ++ return err ++ } ++ ++ // Get the response from S2Av2. ++ resp, err := s2AStream.Recv() ++ if err != nil { ++ grpclog.Infof("Failed to receive client peer cert chain validation response from S2Av2.") ++ return err ++ } ++ ++ // Parse the response. ++ if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { ++ return fmt.Errorf("failed to offload client cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) ++ ++ } ++ ++ if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { ++ return fmt.Errorf("client cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) ++ } ++ ++ return nil ++ } ++} ++ ++// VerifyServerCertificateChain builds a SessionReq, sends it to S2Av2 and ++// receives a SessionResp. ++func VerifyServerCertificateChain(hostname string, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream, serverAuthorizationPolicy []byte) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { ++ return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { ++ // Offload verification to S2Av2. ++ if grpclog.V(1) { ++ grpclog.Infof("Sending request to S2Av2 for server peer cert chain validation.") ++ } ++ if err := s2AStream.Send(&s2av2pb.SessionReq{ ++ ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ ++ ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ ++ Mode: verificationMode, ++ PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer_{ ++ ServerPeer: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer{ ++ CertificateChain: rawCerts, ++ ServerHostname: hostname, ++ SerializedUnrestrictedClientPolicy: serverAuthorizationPolicy, ++ }, ++ }, ++ }, ++ }, ++ }); err != nil { ++ grpclog.Infof("Failed to send request to S2Av2 for server peer cert chain validation.") ++ return err ++ } ++ ++ // Get the response from S2Av2. ++ resp, err := s2AStream.Recv() ++ if err != nil { ++ grpclog.Infof("Failed to receive server peer cert chain validation response from S2Av2.") ++ return err ++ } ++ ++ // Parse the response. ++ if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { ++ return fmt.Errorf("failed to offload server cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) ++ } ++ ++ if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { ++ return fmt.Errorf("server cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) ++ } ++ ++ return nil ++ } ++} +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..958f3cfaddf3645fa6c0578b5b6955d65ac4c172 +GIT binary patch +literal 998 +zcmXqLVt!=M#B^!_GZP~dlZfxTkgMw#Mx5CteAhsdS=K*t&xAY!UN%mxHjlRNyo`+8 +ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# +zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< +zWEA7BsH}1TV!h=2Tmw0AULzv|Gec7&Lt`^z^C)p%6J+ina%mHz5^^vyvNA9?G4eAQ +zG%<29H8CnfQWlH+MyjTjz +z2Bjk+Ub9TzT(Z$!`+w1f9a{SYn?h^$U$mDJKK^KOurlxGlhf8coXvjW^}bgt-rFjD +znHJ|Xo9Wew;}%JlW_Kjsthc+chb^XXpU3LOdy_KCS4OTntgUFd?YsGkV{8gPCrIUU +z7xZ1<|6Oe5g44&3J$ODp>G5YKW=00a#V!U;2J*mEAgjzGVIbBZvce&_^W7!+r|T*d +zavIkkZ(n>dQ`LY6q(GR3)qt6i@xOr}h$qOxWx&zImXe>Fn2DZTf#J-^uui3sYst!# +z{ImS~I{#0uS#WDz%!i-3yIxOGkACKhN$DyM2L{LbrkL@xJCpY3crHP)J~LHEx;nsZlX$z1Na++S~*x&F8L +zSHsIX`Pf;-7fz4f$yxWfRV}c$Te2$X#22gkrUlwjtNp9{IiD}~zjo}z)1J=iJoj~z +zPFk$n)~eK8B%5+0@w1g$N8H(l+-H4!LOMmyS_+%b`$^TD`}g=z?!9=Gts-l=GE2@y +HTP6bly#{pd + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..d2817641bafb022339926786ab85b545f40ac665 +GIT binary patch +literal 1147 +zcmXqLVktLhVvb+H%*4pVB*OPMYW498DSV5WgVNVW|Jr8oCgimNFB_*;n@8JsUPeZ4 +zRtAH{c0+ChPB!LH7B*p~&|nycgNMo4(NM@h03^fC!x>zfmseSqn3HNKV890w6yxnyM_06xZQcZ?W@M4y!->Y40>Qzg1y0 +z&gNQpqF{|c>D+ZZ(rFIKEL_n{%!~|-iyLnkG+s4m+z3nvvdS!tD-9Z#Eo|&v(%3b4 +zVzNoZ_g%=<^$R1;>=M3fAjvH2AGv2jo&gWY$-*qG2F#3%{|y8|JV6#N1CA!Pl>FSp +z%sk}C2j(GQ()!XwH^ngbB~t_{rh@w&h5|n>_#Tt3odjy +zuPCdY_u)fF^#8=jc^^+Q{aqN5`$@Gm({I-q6VU_LtQOD6naL>j(`V|D&;QI6%^$Dc +z@$uH$=blqSKTV1FFaEpmVP=>@vvi*Y`l|mqO|ORNA0oIS8nOI +z?NiA8%5=ljL@D29-OKpueEYz|zv5rHb554&taa#n7142tpUKWU?R-O_r^0)6j>G3Y +aOB;Xxy<$H9NT-Uzt(mu6i*GEf3IzZ)c%0q< + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..d8c3710c85f9ff41ddfc709924c866350a727a4f +GIT binary patch +literal 1013 +zcmXqLV*Y5*#B_53GZP~dlZa+?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg +zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW +z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N +zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! +zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ +zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic +z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 +zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C +z&5N-8^H(H$?psgMz!}^;s& +z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ +zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) +zTR0VF*I2Sr +HHQW{e5x{wf + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..dae619c097512f20d09d2054c63fc0f715d7be24 +GIT binary patch +literal 998 +zcmXqLVt!=M#B^!_GZP~dlZZk6mxvepr#-!NVoqOL*`yn*LOZ7#@Un4gwRyCC=VfH% +zW@RvFY&YaK;ACSCWnmL$3Jr#FICz+x9SwyH1VA$EJeHTH8JZfJT11KSnjmurkxQExm5_sxk(GhDiIJbd +zpox)-sfm%1VO{t`EzVyWX$3+}`kJnrGQY>^=jnZyz4d%;|IQeuB_Gcfo1a{_>B5Fg +zXYbI=4K*7QZdjc*51-)~EjRzyir2SPA5Z@zcTLeXV*g^*2f0P(7p>c +z?c<>#uh>H(ryKBq6bQ4h8Za|5{x=W=@dR183^XGxLzc8JJvw;mpWjp+5Pu +z!*lq#5Sb^i@`a>klJNjzx%iu)2*?h3yy-gEL(;p^g~mZo#n!wzh9Qua5C+wfRd +ztarh_l9E{Oro7&WNVTl@3-~vy&EDU;)-f|r-8<&mQ>C=;``&D2+5V0(pW(g3o1}Sj +z^gY793Mi&+ntK1`1tm%ENt=U{rb%k1PtbRHEhYTylqR?8HqOeuvA1fBY>pc~NUFTR +KZE!sDzAgX``fm0B + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..ce7f8d31d6802c7e68c188af8797c3a063894857 +GIT binary patch +literal 1147 +zcmXqLVktLhVvb+H%*4pVB%&$)xidhT`$ARBi)~L`p2_`@+!*`b5acj4ER7|Ts-W~ +z`MCv&d6kBO2K*oqZXOPo%(Bel%=|nTUzhK>3dC_Z(le%bCn$9moio9jI@8{ +zd;5%c{XT!(ig$;5K+$bpwI6ChVmse+UV1r`!RY5*U#A%ppFJxS>PX#w$sz7*X_R~A +zg!|$g&z66XI$)aW-TrxDio@fp!U^sS#+*GdHMB|H?BTf!^g~J_=D$Y-Ot{~xpTg~G+{k6 +Zdr{DieczoQDL-a%Ib3(Tsv+bGCjf+Ap-TV& + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..04b0d73600b72f80a03943d41973b279db9e8b32 +GIT binary patch +literal 1013 +zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 +ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# +zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< +zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ +z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) +z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ +zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J +zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ +zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ +zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk +zCap+`USuL4V}qf=6kBV_EFAN +zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg +zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW +z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N +zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! +zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ +zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic +z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 +zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C +z&5N-8^H(H$?psgMz!}^;s& +z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ +zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) +zTR0VF*I2Sr +HHQW{e5x{wf + +literal 0 +HcmV?d00001 + +diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem +new file mode 100644 +index 00000000000..493a5a26481 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem +@@ -0,0 +1,24 @@ ++-----BEGIN CERTIFICATE----- ++MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL ++BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 ++YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE ++AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN ++MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ ++BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx ++ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ ++KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC ++AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 ++a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 ++OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 ++RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK ++P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 ++HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu ++0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl ++MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 ++EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 ++/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA ++QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ ++nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD ++X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco ++pKklVz0= ++-----END CERTIFICATE----- +diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem +new file mode 100644 +index 00000000000..55a7f10c742 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem +@@ -0,0 +1,27 @@ ++-----BEGIN RSA PRIVATE KEY----- ++MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF ++l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj +++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G ++4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA ++xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh ++68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ ++/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL ++Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA ++VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 ++9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH ++MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt ++aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq ++xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx ++2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv ++EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z ++aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq ++udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs ++VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm ++56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT ++GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V ++Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm ++HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q ++BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH ++qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh ++GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ++-----END RSA PRIVATE KEY----- +diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der +new file mode 100644 +index 0000000000000000000000000000000000000000..04b0d73600b72f80a03943d41973b279db9e8b32 +GIT binary patch +literal 1013 +zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 +ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# +zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< +zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ +z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) +z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ +zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J +zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ +zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ +zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk +zCap+`USuL4V}qf=6kBV_EFAN +zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta 0 { ++ cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) ++ if cert.PrivateKey == nil { ++ return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") ++ } ++ } ++ ++ minVersion, maxVersion, err := getTLSMinMaxVersionsClient(tlsConfig) ++ if err != nil { ++ return nil, err ++ } ++ ++ // Create mTLS credentials for client. ++ config := &tls.Config{ ++ VerifyPeerCertificate: certverifier.VerifyServerCertificateChain(serverHostname, verificationMode, s2AStream, serverAuthorizationPolicy), ++ ServerName: serverHostname, ++ InsecureSkipVerify: true, // NOLINT ++ ClientSessionCache: nil, ++ SessionTicketsDisabled: true, ++ MinVersion: minVersion, ++ MaxVersion: maxVersion, ++ NextProtos: []string{h2}, ++ } ++ if len(tlsConfig.CertificateChain) > 0 { ++ config.Certificates = []tls.Certificate{cert} ++ } ++ return config, nil ++} ++ ++// GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. ++func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { ++ return &tls.Config{ ++ GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), ++ }, nil ++} ++ ++// ClientConfig builds a TLS config for a server to establish a secure ++// connection with a client, based on SNI communicated during ClientHello. ++// Ensures that server presents the correct certificate to establish a TLS ++// connection. ++func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { ++ return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { ++ tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) ++ if err != nil { ++ return nil, err ++ } ++ ++ var cert tls.Certificate ++ for i, v := range tlsConfig.CertificateChain { ++ // Populate Certificates field. ++ block, _ := pem.Decode([]byte(v)) ++ if block == nil { ++ return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") ++ } ++ x509Cert, err := x509.ParseCertificate(block.Bytes) ++ if err != nil { ++ return nil, err ++ } ++ cert.Certificate = append(cert.Certificate, x509Cert.Raw) ++ if i == 0 { ++ cert.Leaf = x509Cert ++ } ++ } ++ ++ cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) ++ if cert.PrivateKey == nil { ++ return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") ++ } ++ ++ minVersion, maxVersion, err := getTLSMinMaxVersionsServer(tlsConfig) ++ if err != nil { ++ return nil, err ++ } ++ ++ clientAuth := getTLSClientAuthType(tlsConfig) ++ ++ var cipherSuites []uint16 ++ cipherSuites = getCipherSuites(tlsConfig.Ciphersuites) ++ ++ // Create mTLS credentials for server. ++ return &tls.Config{ ++ Certificates: []tls.Certificate{cert}, ++ VerifyPeerCertificate: certverifier.VerifyClientCertificateChain(verificationMode, s2AStream), ++ ClientAuth: clientAuth, ++ CipherSuites: cipherSuites, ++ SessionTicketsDisabled: true, ++ MinVersion: minVersion, ++ MaxVersion: maxVersion, ++ NextProtos: []string{h2}, ++ }, nil ++ } ++} ++ ++func getCipherSuites(tlsConfigCipherSuites []commonpb.Ciphersuite) []uint16 { ++ var tlsGoCipherSuites []uint16 ++ for _, v := range tlsConfigCipherSuites { ++ s := getTLSCipherSuite(v) ++ if s != 0xffff { ++ tlsGoCipherSuites = append(tlsGoCipherSuites, s) ++ } ++ } ++ return tlsGoCipherSuites ++} ++ ++func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { ++ switch tlsCipherSuite { ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: ++ return tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: ++ return tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: ++ return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256: ++ return tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384: ++ return tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ++ case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: ++ return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 ++ default: ++ return 0xffff ++ } ++} ++ ++func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { ++ authMechanisms := getAuthMechanisms(tokenManager, localIdentities) ++ var locID *commonpbv1.Identity ++ if localIdentities != nil { ++ locID = localIdentities[0] ++ } ++ ++ if err := s2AStream.Send(&s2av2pb.SessionReq{ ++ LocalIdentity: locID, ++ AuthenticationMechanisms: authMechanisms, ++ ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ ++ GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ ++ ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_SERVER, ++ Sni: sni, ++ }, ++ }, ++ }); err != nil { ++ return nil, err ++ } ++ ++ resp, err := s2AStream.Recv() ++ if err != nil { ++ return nil, err ++ } ++ ++ // TODO(rmehta19): Add unit test for this if statement. ++ if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { ++ return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) ++ } ++ ++ return resp.GetGetTlsConfigurationResp().GetServerTlsConfiguration(), nil ++} ++ ++func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) tls.ClientAuthType { ++ var clientAuth tls.ClientAuthType ++ switch x := tlsConfig.RequestClientCertificate; x { ++ case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE: ++ clientAuth = tls.NoClientCert ++ case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: ++ clientAuth = tls.RequestClientCert ++ case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY: ++ // This case actually maps to tls.VerifyClientCertIfGiven. However this ++ // mapping triggers normal verification, followed by custom verification, ++ // specified in VerifyPeerCertificate. To bypass normal verification, and ++ // only do custom verification we set clientAuth to RequireAnyClientCert or ++ // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full ++ // discussion. ++ clientAuth = tls.RequireAnyClientCert ++ case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: ++ clientAuth = tls.RequireAnyClientCert ++ case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY: ++ // This case actually maps to tls.RequireAndVerifyClientCert. However this ++ // mapping triggers normal verification, followed by custom verification, ++ // specified in VerifyPeerCertificate. To bypass normal verification, and ++ // only do custom verification we set clientAuth to RequireAnyClientCert or ++ // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full ++ // discussion. ++ clientAuth = tls.RequireAnyClientCert ++ default: ++ clientAuth = tls.RequireAnyClientCert ++ } ++ return clientAuth ++} ++ ++func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { ++ if tokenManager == nil { ++ return nil ++ } ++ if len(localIdentities) == 0 { ++ token, err := tokenManager.DefaultToken() ++ if err != nil { ++ grpclog.Infof("Unable to get token for empty local identity: %v", err) ++ return nil ++ } ++ return []*s2av2pb.AuthenticationMechanism{ ++ { ++ MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }, ++ } ++ } ++ var authMechanisms []*s2av2pb.AuthenticationMechanism ++ for _, localIdentity := range localIdentities { ++ if localIdentity == nil { ++ token, err := tokenManager.DefaultToken() ++ if err != nil { ++ grpclog.Infof("Unable to get default token for local identity %v: %v", localIdentity, err) ++ continue ++ } ++ authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ ++ Identity: localIdentity, ++ MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }) ++ } else { ++ token, err := tokenManager.Token(localIdentity) ++ if err != nil { ++ grpclog.Infof("Unable to get token for local identity %v: %v", localIdentity, err) ++ continue ++ } ++ authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ ++ Identity: localIdentity, ++ MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ ++ Token: token, ++ }, ++ }) ++ } ++ } ++ return authMechanisms ++} ++ ++// TODO(rmehta19): refactor switch statements into a helper function. ++func getTLSMinMaxVersionsClient(tlsConfig *s2av2pb.GetTlsConfigurationResp_ClientTlsConfiguration) (uint16, uint16, error) { ++ // Map S2Av2 TLSVersion to consts defined in tls package. ++ var minVersion uint16 ++ var maxVersion uint16 ++ switch x := tlsConfig.MinTlsVersion; x { ++ case commonpb.TLSVersion_TLS_VERSION_1_0: ++ minVersion = tls.VersionTLS10 ++ case commonpb.TLSVersion_TLS_VERSION_1_1: ++ minVersion = tls.VersionTLS11 ++ case commonpb.TLSVersion_TLS_VERSION_1_2: ++ minVersion = tls.VersionTLS12 ++ case commonpb.TLSVersion_TLS_VERSION_1_3: ++ minVersion = tls.VersionTLS13 ++ default: ++ return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) ++ } ++ ++ switch x := tlsConfig.MaxTlsVersion; x { ++ case commonpb.TLSVersion_TLS_VERSION_1_0: ++ maxVersion = tls.VersionTLS10 ++ case commonpb.TLSVersion_TLS_VERSION_1_1: ++ maxVersion = tls.VersionTLS11 ++ case commonpb.TLSVersion_TLS_VERSION_1_2: ++ maxVersion = tls.VersionTLS12 ++ case commonpb.TLSVersion_TLS_VERSION_1_3: ++ maxVersion = tls.VersionTLS13 ++ default: ++ return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) ++ } ++ if minVersion > maxVersion { ++ return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") ++ } ++ return minVersion, maxVersion, nil ++} ++ ++func getTLSMinMaxVersionsServer(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) (uint16, uint16, error) { ++ // Map S2Av2 TLSVersion to consts defined in tls package. ++ var minVersion uint16 ++ var maxVersion uint16 ++ switch x := tlsConfig.MinTlsVersion; x { ++ case commonpb.TLSVersion_TLS_VERSION_1_0: ++ minVersion = tls.VersionTLS10 ++ case commonpb.TLSVersion_TLS_VERSION_1_1: ++ minVersion = tls.VersionTLS11 ++ case commonpb.TLSVersion_TLS_VERSION_1_2: ++ minVersion = tls.VersionTLS12 ++ case commonpb.TLSVersion_TLS_VERSION_1_3: ++ minVersion = tls.VersionTLS13 ++ default: ++ return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) ++ } ++ ++ switch x := tlsConfig.MaxTlsVersion; x { ++ case commonpb.TLSVersion_TLS_VERSION_1_0: ++ maxVersion = tls.VersionTLS10 ++ case commonpb.TLSVersion_TLS_VERSION_1_1: ++ maxVersion = tls.VersionTLS11 ++ case commonpb.TLSVersion_TLS_VERSION_1_2: ++ maxVersion = tls.VersionTLS12 ++ case commonpb.TLSVersion_TLS_VERSION_1_3: ++ maxVersion = tls.VersionTLS13 ++ default: ++ return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) ++ } ++ if minVersion > maxVersion { ++ return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") ++ } ++ return minVersion, maxVersion, nil ++} +diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go +new file mode 100644 +index 00000000000..1c1349de4af +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/s2a.go +@@ -0,0 +1,412 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package s2a provides the S2A transport credentials used by a gRPC ++// application. ++package s2a ++ ++import ( ++ "context" ++ "crypto/tls" ++ "errors" ++ "fmt" ++ "net" ++ "sync" ++ "time" ++ ++ "github.com/golang/protobuf/proto" ++ "github.com/google/s2a-go/fallback" ++ "github.com/google/s2a-go/internal/handshaker" ++ "github.com/google/s2a-go/internal/handshaker/service" ++ "github.com/google/s2a-go/internal/tokenmanager" ++ "github.com/google/s2a-go/internal/v2" ++ "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/grpclog" ++ ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ++) ++ ++const ( ++ s2aSecurityProtocol = "tls" ++ // defaultTimeout specifies the default server handshake timeout. ++ defaultTimeout = 30.0 * time.Second ++) ++ ++// s2aTransportCreds are the transport credentials required for establishing ++// a secure connection using the S2A. They implement the ++// credentials.TransportCredentials interface. ++type s2aTransportCreds struct { ++ info *credentials.ProtocolInfo ++ minTLSVersion commonpb.TLSVersion ++ maxTLSVersion commonpb.TLSVersion ++ // tlsCiphersuites contains the ciphersuites used in the S2A connection. ++ // Note that these are currently unconfigurable. ++ tlsCiphersuites []commonpb.Ciphersuite ++ // localIdentity should only be used by the client. ++ localIdentity *commonpb.Identity ++ // localIdentities should only be used by the server. ++ localIdentities []*commonpb.Identity ++ // targetIdentities should only be used by the client. ++ targetIdentities []*commonpb.Identity ++ isClient bool ++ s2aAddr string ++ ensureProcessSessionTickets *sync.WaitGroup ++} ++ ++// NewClientCreds returns a client-side transport credentials object that uses ++// the S2A to establish a secure connection with a server. ++func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, error) { ++ if opts == nil { ++ return nil, errors.New("nil client options") ++ } ++ var targetIdentities []*commonpb.Identity ++ for _, targetIdentity := range opts.TargetIdentities { ++ protoTargetIdentity, err := toProtoIdentity(targetIdentity) ++ if err != nil { ++ return nil, err ++ } ++ targetIdentities = append(targetIdentities, protoTargetIdentity) ++ } ++ localIdentity, err := toProtoIdentity(opts.LocalIdentity) ++ if err != nil { ++ return nil, err ++ } ++ if opts.EnableLegacyMode { ++ return &s2aTransportCreds{ ++ info: &credentials.ProtocolInfo{ ++ SecurityProtocol: s2aSecurityProtocol, ++ }, ++ minTLSVersion: commonpb.TLSVersion_TLS1_3, ++ maxTLSVersion: commonpb.TLSVersion_TLS1_3, ++ tlsCiphersuites: []commonpb.Ciphersuite{ ++ commonpb.Ciphersuite_AES_128_GCM_SHA256, ++ commonpb.Ciphersuite_AES_256_GCM_SHA384, ++ commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, ++ }, ++ localIdentity: localIdentity, ++ targetIdentities: targetIdentities, ++ isClient: true, ++ s2aAddr: opts.S2AAddress, ++ ensureProcessSessionTickets: opts.EnsureProcessSessionTickets, ++ }, nil ++ } ++ verificationMode := getVerificationMode(opts.VerificationMode) ++ var fallbackFunc fallback.ClientHandshake ++ if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { ++ fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc ++ } ++ return v2.NewClientCreds(opts.S2AAddress, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) ++} ++ ++// NewServerCreds returns a server-side transport credentials object that uses ++// the S2A to establish a secure connection with a client. ++func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, error) { ++ if opts == nil { ++ return nil, errors.New("nil server options") ++ } ++ var localIdentities []*commonpb.Identity ++ for _, localIdentity := range opts.LocalIdentities { ++ protoLocalIdentity, err := toProtoIdentity(localIdentity) ++ if err != nil { ++ return nil, err ++ } ++ localIdentities = append(localIdentities, protoLocalIdentity) ++ } ++ if opts.EnableLegacyMode { ++ return &s2aTransportCreds{ ++ info: &credentials.ProtocolInfo{ ++ SecurityProtocol: s2aSecurityProtocol, ++ }, ++ minTLSVersion: commonpb.TLSVersion_TLS1_3, ++ maxTLSVersion: commonpb.TLSVersion_TLS1_3, ++ tlsCiphersuites: []commonpb.Ciphersuite{ ++ commonpb.Ciphersuite_AES_128_GCM_SHA256, ++ commonpb.Ciphersuite_AES_256_GCM_SHA384, ++ commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, ++ }, ++ localIdentities: localIdentities, ++ isClient: false, ++ s2aAddr: opts.S2AAddress, ++ }, nil ++ } ++ verificationMode := getVerificationMode(opts.VerificationMode) ++ return v2.NewServerCreds(opts.S2AAddress, localIdentities, verificationMode, opts.getS2AStream) ++} ++ ++// ClientHandshake initiates a client-side TLS handshake using the S2A. ++func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { ++ if !c.isClient { ++ return nil, nil, errors.New("client handshake called using server transport credentials") ++ } ++ ++ // Connect to the S2A. ++ hsConn, err := service.Dial(c.s2aAddr) ++ if err != nil { ++ grpclog.Infof("Failed to connect to S2A: %v", err) ++ return nil, nil, err ++ } ++ ++ var cancel context.CancelFunc ++ ctx, cancel = context.WithCancel(ctx) ++ defer cancel() ++ ++ opts := &handshaker.ClientHandshakerOptions{ ++ MinTLSVersion: c.minTLSVersion, ++ MaxTLSVersion: c.maxTLSVersion, ++ TLSCiphersuites: c.tlsCiphersuites, ++ TargetIdentities: c.targetIdentities, ++ LocalIdentity: c.localIdentity, ++ TargetName: serverAuthority, ++ EnsureProcessSessionTickets: c.ensureProcessSessionTickets, ++ } ++ chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) ++ if err != nil { ++ grpclog.Infof("Call to handshaker.NewClientHandshaker failed: %v", err) ++ return nil, nil, err ++ } ++ defer func() { ++ if err != nil { ++ if closeErr := chs.Close(); closeErr != nil { ++ grpclog.Infof("Close failed unexpectedly: %v", err) ++ err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) ++ } ++ } ++ }() ++ ++ secConn, authInfo, err := chs.ClientHandshake(context.Background()) ++ if err != nil { ++ grpclog.Infof("Handshake failed: %v", err) ++ return nil, nil, err ++ } ++ return secConn, authInfo, nil ++} ++ ++// ServerHandshake initiates a server-side TLS handshake using the S2A. ++func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { ++ if c.isClient { ++ return nil, nil, errors.New("server handshake called using client transport credentials") ++ } ++ ++ // Connect to the S2A. ++ hsConn, err := service.Dial(c.s2aAddr) ++ if err != nil { ++ grpclog.Infof("Failed to connect to S2A: %v", err) ++ return nil, nil, err ++ } ++ ++ ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) ++ defer cancel() ++ ++ opts := &handshaker.ServerHandshakerOptions{ ++ MinTLSVersion: c.minTLSVersion, ++ MaxTLSVersion: c.maxTLSVersion, ++ TLSCiphersuites: c.tlsCiphersuites, ++ LocalIdentities: c.localIdentities, ++ } ++ shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) ++ if err != nil { ++ grpclog.Infof("Call to handshaker.NewServerHandshaker failed: %v", err) ++ return nil, nil, err ++ } ++ defer func() { ++ if err != nil { ++ if closeErr := shs.Close(); closeErr != nil { ++ grpclog.Infof("Close failed unexpectedly: %v", err) ++ err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) ++ } ++ } ++ }() ++ ++ secConn, authInfo, err := shs.ServerHandshake(context.Background()) ++ if err != nil { ++ grpclog.Infof("Handshake failed: %v", err) ++ return nil, nil, err ++ } ++ return secConn, authInfo, nil ++} ++ ++func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { ++ return *c.info ++} ++ ++func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { ++ info := *c.info ++ var localIdentity *commonpb.Identity ++ if c.localIdentity != nil { ++ localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) ++ } ++ var localIdentities []*commonpb.Identity ++ if c.localIdentities != nil { ++ localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) ++ for i, localIdentity := range c.localIdentities { ++ localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) ++ } ++ } ++ var targetIdentities []*commonpb.Identity ++ if c.targetIdentities != nil { ++ targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) ++ for i, targetIdentity := range c.targetIdentities { ++ targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) ++ } ++ } ++ return &s2aTransportCreds{ ++ info: &info, ++ minTLSVersion: c.minTLSVersion, ++ maxTLSVersion: c.maxTLSVersion, ++ tlsCiphersuites: c.tlsCiphersuites, ++ localIdentity: localIdentity, ++ localIdentities: localIdentities, ++ targetIdentities: targetIdentities, ++ isClient: c.isClient, ++ s2aAddr: c.s2aAddr, ++ } ++} ++ ++func (c *s2aTransportCreds) OverrideServerName(serverNameOverride string) error { ++ c.info.ServerName = serverNameOverride ++ return nil ++} ++ ++// TLSClientConfigOptions specifies parameters for creating client TLS config. ++type TLSClientConfigOptions struct { ++ // ServerName is required by s2a as the expected name when verifying the hostname found in server's certificate. ++ // tlsConfig, _ := factory.Build(ctx, &s2a.TLSClientConfigOptions{ ++ // ServerName: "example.com", ++ // }) ++ ServerName string ++} ++ ++// TLSClientConfigFactory defines the interface for a client TLS config factory. ++type TLSClientConfigFactory interface { ++ Build(ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) ++} ++ ++// NewTLSClientConfigFactory returns an instance of s2aTLSClientConfigFactory. ++func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, error) { ++ if opts == nil { ++ return nil, fmt.Errorf("opts must be non-nil") ++ } ++ if opts.EnableLegacyMode { ++ return nil, fmt.Errorf("NewTLSClientConfigFactory only supports S2Av2") ++ } ++ tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() ++ if err != nil { ++ // The only possible error is: access token not set in the environment, ++ // which is okay in environments other than serverless. ++ grpclog.Infof("Access token manager not initialized: %v", err) ++ return &s2aTLSClientConfigFactory{ ++ s2av2Address: opts.S2AAddress, ++ tokenManager: nil, ++ verificationMode: getVerificationMode(opts.VerificationMode), ++ serverAuthorizationPolicy: opts.serverAuthorizationPolicy, ++ }, nil ++ } ++ return &s2aTLSClientConfigFactory{ ++ s2av2Address: opts.S2AAddress, ++ tokenManager: tokenManager, ++ verificationMode: getVerificationMode(opts.VerificationMode), ++ serverAuthorizationPolicy: opts.serverAuthorizationPolicy, ++ }, nil ++} ++ ++type s2aTLSClientConfigFactory struct { ++ s2av2Address string ++ tokenManager tokenmanager.AccessTokenManager ++ verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode ++ serverAuthorizationPolicy []byte ++} ++ ++func (f *s2aTLSClientConfigFactory) Build( ++ ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) { ++ serverName := "" ++ if opts != nil && opts.ServerName != "" { ++ serverName = opts.ServerName ++ } ++ return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) ++} ++ ++func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { ++ switch verificationMode { ++ case ConnectToGoogle: ++ return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ++ case Spiffe: ++ return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE ++ default: ++ return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED ++ } ++} ++ ++// NewS2ADialTLSContextFunc returns a dialer which establishes an MTLS connection using S2A. ++// Example use with http.RoundTripper: ++// ++// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ ++// S2AAddress: s2aAddress, // required ++// }) ++// transport := http.DefaultTransport ++// transport.DialTLSContext = dialTLSContext ++func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, network, addr string) (net.Conn, error) { ++ ++ return func(ctx context.Context, network, addr string) (net.Conn, error) { ++ ++ fallback := func(err error) (net.Conn, error) { ++ if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackDialer != nil && ++ opts.FallbackOpts.FallbackDialer.Dialer != nil && opts.FallbackOpts.FallbackDialer.ServerAddr != "" { ++ fbDialer := opts.FallbackOpts.FallbackDialer ++ grpclog.Infof("fall back to dial: %s", fbDialer.ServerAddr) ++ fbConn, fbErr := fbDialer.Dialer.DialContext(ctx, network, fbDialer.ServerAddr) ++ if fbErr != nil { ++ return nil, fmt.Errorf("error fallback to %s: %v; S2A error: %w", fbDialer.ServerAddr, fbErr, err) ++ } ++ return fbConn, nil ++ } ++ return nil, err ++ } ++ ++ factory, err := NewTLSClientConfigFactory(opts) ++ if err != nil { ++ grpclog.Infof("error creating S2A client config factory: %v", err) ++ return fallback(err) ++ } ++ ++ serverName, _, err := net.SplitHostPort(addr) ++ if err != nil { ++ serverName = addr ++ } ++ timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout()) ++ defer cancel() ++ s2aTLSConfig, err := factory.Build(timeoutCtx, &TLSClientConfigOptions{ ++ ServerName: serverName, ++ }) ++ if err != nil { ++ grpclog.Infof("error building S2A TLS config: %v", err) ++ return fallback(err) ++ } ++ ++ s2aDialer := &tls.Dialer{ ++ Config: s2aTLSConfig, ++ } ++ c, err := s2aDialer.DialContext(ctx, network, addr) ++ if err != nil { ++ grpclog.Infof("error dialing with S2A to %s: %v", addr, err) ++ return fallback(err) ++ } ++ grpclog.Infof("success dialing MTLS to %s with S2A", addr) ++ return c, nil ++ } ++} +diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go +new file mode 100644 +index 00000000000..94feafb9cf8 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/s2a_options.go +@@ -0,0 +1,208 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package s2a ++ ++import ( ++ "context" ++ "crypto/tls" ++ "errors" ++ "sync" ++ ++ "github.com/google/s2a-go/fallback" ++ "github.com/google/s2a-go/stream" ++ ++ s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" ++) ++ ++// Identity is the interface for S2A identities. ++type Identity interface { ++ // Name returns the name of the identity. ++ Name() string ++} ++ ++type spiffeID struct { ++ spiffeID string ++} ++ ++func (s *spiffeID) Name() string { return s.spiffeID } ++ ++// NewSpiffeID creates a SPIFFE ID from id. ++func NewSpiffeID(id string) Identity { ++ return &spiffeID{spiffeID: id} ++} ++ ++type hostname struct { ++ hostname string ++} ++ ++func (h *hostname) Name() string { return h.hostname } ++ ++// NewHostname creates a hostname from name. ++func NewHostname(name string) Identity { ++ return &hostname{hostname: name} ++} ++ ++type uid struct { ++ uid string ++} ++ ++func (h *uid) Name() string { return h.uid } ++ ++// NewUID creates a UID from name. ++func NewUID(name string) Identity { ++ return &uid{uid: name} ++} ++ ++// VerificationModeType specifies the mode that S2A must use to verify the peer ++// certificate chain. ++type VerificationModeType int ++ ++// Three types of verification modes. ++const ( ++ Unspecified = iota ++ ConnectToGoogle ++ Spiffe ++) ++ ++// ClientOptions contains the client-side options used to establish a secure ++// channel using the S2A handshaker service. ++type ClientOptions struct { ++ // TargetIdentities contains a list of allowed server identities. One of the ++ // target identities should match the peer identity in the handshake ++ // result; otherwise, the handshake fails. ++ TargetIdentities []Identity ++ // LocalIdentity is the local identity of the client application. If none is ++ // provided, then the S2A will choose the default identity, if one exists. ++ LocalIdentity Identity ++ // S2AAddress is the address of the S2A. ++ S2AAddress string ++ // EnsureProcessSessionTickets waits for all session tickets to be sent to ++ // S2A before a process completes. ++ // ++ // This functionality is crucial for processes that complete very soon after ++ // using S2A to establish a TLS connection, but it can be ignored for longer ++ // lived processes. ++ // ++ // Usage example: ++ // func main() { ++ // var ensureProcessSessionTickets sync.WaitGroup ++ // clientOpts := &s2a.ClientOptions{ ++ // EnsureProcessSessionTickets: &ensureProcessSessionTickets, ++ // // Set other members. ++ // } ++ // creds, _ := s2a.NewClientCreds(clientOpts) ++ // conn, _ := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) ++ // defer conn.Close() ++ // ++ // // Make RPC call. ++ // ++ // // The process terminates right after the RPC call ends. ++ // // ensureProcessSessionTickets can be used to ensure resumption ++ // // tickets are fully processed. If the process is long-lived, using ++ // // ensureProcessSessionTickets is not necessary. ++ // ensureProcessSessionTickets.Wait() ++ // } ++ EnsureProcessSessionTickets *sync.WaitGroup ++ // If true, enables the use of legacy S2Av1. ++ EnableLegacyMode bool ++ // VerificationMode specifies the mode that S2A must use to verify the ++ // peer certificate chain. ++ VerificationMode VerificationModeType ++ ++ // Optional fallback after dialing with S2A fails. ++ FallbackOpts *FallbackOptions ++ ++ // Generates an S2AStream interface for talking to the S2A server. ++ getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) ++ ++ // Serialized user specified policy for server authorization. ++ serverAuthorizationPolicy []byte ++} ++ ++// FallbackOptions prescribes the fallback logic that should be taken if the application fails to connect with S2A. ++type FallbackOptions struct { ++ // FallbackClientHandshakeFunc is used to specify fallback behavior when calling s2a.NewClientCreds(). ++ // It will be called by ClientHandshake function, after handshake with S2A fails. ++ // s2a.NewClientCreds() ignores the other FallbackDialer field. ++ FallbackClientHandshakeFunc fallback.ClientHandshake ++ ++ // FallbackDialer is used to specify fallback behavior when calling s2a.NewS2aDialTLSContextFunc(). ++ // It passes in a custom fallback dialer and server address to use after dialing with S2A fails. ++ // s2a.NewS2aDialTLSContextFunc() ignores the other FallbackClientHandshakeFunc field. ++ FallbackDialer *FallbackDialer ++} ++ ++// FallbackDialer contains a fallback tls.Dialer and a server address to connect to. ++type FallbackDialer struct { ++ // Dialer specifies a fallback tls.Dialer. ++ Dialer *tls.Dialer ++ // ServerAddr is used by Dialer to establish fallback connection. ++ ServerAddr string ++} ++ ++// DefaultClientOptions returns the default client options. ++func DefaultClientOptions(s2aAddress string) *ClientOptions { ++ return &ClientOptions{ ++ S2AAddress: s2aAddress, ++ VerificationMode: ConnectToGoogle, ++ } ++} ++ ++// ServerOptions contains the server-side options used to establish a secure ++// channel using the S2A handshaker service. ++type ServerOptions struct { ++ // LocalIdentities is the list of local identities that may be assumed by ++ // the server. If no local identity is specified, then the S2A chooses a ++ // default local identity, if one exists. ++ LocalIdentities []Identity ++ // S2AAddress is the address of the S2A. ++ S2AAddress string ++ // If true, enables the use of legacy S2Av1. ++ EnableLegacyMode bool ++ // VerificationMode specifies the mode that S2A must use to verify the ++ // peer certificate chain. ++ VerificationMode VerificationModeType ++ ++ // Generates an S2AStream interface for talking to the S2A server. ++ getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) ++} ++ ++// DefaultServerOptions returns the default server options. ++func DefaultServerOptions(s2aAddress string) *ServerOptions { ++ return &ServerOptions{ ++ S2AAddress: s2aAddress, ++ VerificationMode: ConnectToGoogle, ++ } ++} ++ ++func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { ++ if identity == nil { ++ return nil, nil ++ } ++ switch id := identity.(type) { ++ case *spiffeID: ++ return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil ++ case *hostname: ++ return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil ++ case *uid: ++ return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil ++ default: ++ return nil, errors.New("unrecognized identity type") ++ } ++} +diff --git a/vendor/github.com/google/s2a-go/s2a_utils.go b/vendor/github.com/google/s2a-go/s2a_utils.go +new file mode 100644 +index 00000000000..d649cc46148 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/s2a_utils.go +@@ -0,0 +1,79 @@ ++/* ++ * ++ * Copyright 2021 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package s2a ++ ++import ( ++ "context" ++ "errors" ++ ++ commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" ++ "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/peer" ++) ++ ++// AuthInfo exposes security information from the S2A to the application. ++type AuthInfo interface { ++ // AuthType returns the authentication type. ++ AuthType() string ++ // ApplicationProtocol returns the application protocol, e.g. "grpc". ++ ApplicationProtocol() string ++ // TLSVersion returns the TLS version negotiated during the handshake. ++ TLSVersion() commonpb.TLSVersion ++ // Ciphersuite returns the ciphersuite negotiated during the handshake. ++ Ciphersuite() commonpb.Ciphersuite ++ // PeerIdentity returns the authenticated identity of the peer. ++ PeerIdentity() *commonpb.Identity ++ // LocalIdentity returns the local identity of the application used during ++ // session setup. ++ LocalIdentity() *commonpb.Identity ++ // PeerCertFingerprint returns the SHA256 hash of the peer certificate used in ++ // the S2A handshake. ++ PeerCertFingerprint() []byte ++ // LocalCertFingerprint returns the SHA256 hash of the local certificate used ++ // in the S2A handshake. ++ LocalCertFingerprint() []byte ++ // IsHandshakeResumed returns true if a cached session was used to resume ++ // the handshake. ++ IsHandshakeResumed() bool ++ // SecurityLevel returns the security level of the connection. ++ SecurityLevel() credentials.SecurityLevel ++} ++ ++// AuthInfoFromPeer extracts the authinfo.S2AAuthInfo object from the given ++// peer, if it exists. This API should be used by gRPC clients after ++// obtaining a peer object using the grpc.Peer() CallOption. ++func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { ++ s2aAuthInfo, ok := p.AuthInfo.(AuthInfo) ++ if !ok { ++ return nil, errors.New("no S2AAuthInfo found in Peer") ++ } ++ return s2aAuthInfo, nil ++} ++ ++// AuthInfoFromContext extracts the authinfo.S2AAuthInfo object from the given ++// context, if it exists. This API should be used by gRPC server RPC handlers ++// to get information about the peer. On the client-side, use the grpc.Peer() ++// CallOption and the AuthInfoFromPeer function. ++func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { ++ p, ok := peer.FromContext(ctx) ++ if !ok { ++ return nil, errors.New("no Peer found in Context") ++ } ++ return AuthInfoFromPeer(p) ++} +diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go +new file mode 100644 +index 00000000000..584bf32b1c7 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/stream/s2a_stream.go +@@ -0,0 +1,34 @@ ++/* ++ * ++ * Copyright 2023 Google LLC ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++// Package stream provides an interface for bidirectional streaming to the S2A server. ++package stream ++ ++import ( ++ s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ++) ++ ++// S2AStream defines the operation for communicating with the S2A server over a bidirectional stream. ++type S2AStream interface { ++ // Send sends the message to the S2A server. ++ Send(*s2av2pb.SessionReq) error ++ // Recv receives the message from the S2A server. ++ Recv() (*s2av2pb.SessionResp, error) ++ // Closes the channel to the S2A server. ++ CloseSend() error ++} +diff --git a/vendor/github.com/google/s2a-go/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/testdata/client_cert.pem +new file mode 100644 +index 00000000000..493a5a26481 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/testdata/client_cert.pem +@@ -0,0 +1,24 @@ ++-----BEGIN CERTIFICATE----- ++MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL ++BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 ++YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE ++AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN ++MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ ++BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx ++ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ ++KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC ++AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 ++a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 ++OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 ++RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK ++P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 ++HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu ++0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl ++MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 ++EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 ++/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA ++QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ ++nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD ++X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco ++pKklVz0= ++-----END CERTIFICATE----- +diff --git a/vendor/github.com/google/s2a-go/testdata/client_key.pem b/vendor/github.com/google/s2a-go/testdata/client_key.pem +new file mode 100644 +index 00000000000..55a7f10c742 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/testdata/client_key.pem +@@ -0,0 +1,27 @@ ++-----BEGIN RSA PRIVATE KEY----- ++MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF ++l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj +++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G ++4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA ++xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh ++68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ ++/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL ++Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA ++VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 ++9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH ++MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt ++aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq ++xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx ++2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv ++EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z ++aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq ++udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs ++VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm ++56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT ++GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V ++Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm ++HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q ++BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH ++qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh ++GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ++-----END RSA PRIVATE KEY----- +diff --git a/vendor/github.com/google/s2a-go/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/testdata/server_cert.pem +new file mode 100644 +index 00000000000..0f98322c724 +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/testdata/server_cert.pem +@@ -0,0 +1,24 @@ ++-----BEGIN CERTIFICATE----- ++MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL ++BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 ++YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE ++AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN ++MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ ++BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx ++ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ ++KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC ++AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT ++fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ ++qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE ++xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es ++Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 ++Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM ++ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX ++MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR ++e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X ++POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl ++AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg ++odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ ++PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN ++Dhm6uZM= ++-----END CERTIFICATE----- +diff --git a/vendor/github.com/google/s2a-go/testdata/server_key.pem b/vendor/github.com/google/s2a-go/testdata/server_key.pem +new file mode 100644 +index 00000000000..81afea783df +--- /dev/null ++++ b/vendor/github.com/google/s2a-go/testdata/server_key.pem +@@ -0,0 +1,27 @@ ++-----BEGIN RSA PRIVATE KEY----- ++MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs ++8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO ++QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk ++XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA ++Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc ++gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf ++LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl ++jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 ++4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q ++Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P ++nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 ++drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE ++duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 ++L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG ++06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm ++eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD ++uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 ++lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL ++a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb ++hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ ++7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j ++r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 ++eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD ++B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz ++7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ++-----END RSA PRIVATE KEY----- +diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml +deleted file mode 100644 +index d8156a60ba9..00000000000 +--- a/vendor/github.com/google/uuid/.travis.yml ++++ /dev/null +@@ -1,9 +0,0 @@ +-language: go +- +-go: +- - 1.4.3 +- - 1.5.3 +- - tip +- +-script: +- - go test -v ./... +diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md +new file mode 100644 +index 00000000000..2bd78667afb +--- /dev/null ++++ b/vendor/github.com/google/uuid/CHANGELOG.md +@@ -0,0 +1,10 @@ ++# Changelog ++ ++## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) ++ ++ ++### Bug Fixes ++ ++* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) ++ ++## Changelog +diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md +index 04fdf09f136..5566888726d 100644 +--- a/vendor/github.com/google/uuid/CONTRIBUTING.md ++++ b/vendor/github.com/google/uuid/CONTRIBUTING.md +@@ -2,6 +2,22 @@ + + We definitely welcome patches and contribution to this project! + ++### Tips ++ ++Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). ++ ++Always try to include a test case! If it is not possible or not necessary, ++please explain why in the pull request description. ++ ++### Releasing ++ ++Commits that would precipitate a SemVer change, as desrcibed in the Conventional ++Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) ++to create a release candidate pull request. Once submitted, `release-please` ++will create a release. ++ ++For tips on how to work with `release-please`, see its documentation. ++ + ### Legal requirements + + In order to protect both you and ourselves, you will need to sign the +diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md +index f765a46f915..3e9a61889de 100644 +--- a/vendor/github.com/google/uuid/README.md ++++ b/vendor/github.com/google/uuid/README.md +@@ -1,6 +1,6 @@ +-# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) ++# uuid + The uuid package generates and inspects UUIDs based on +-[RFC 4122](http://tools.ietf.org/html/rfc4122) ++[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) + and DCE 1.1: Authentication and Security Services. + + This package is based on the github.com/pborman/uuid package (previously named +@@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this + change is the ability to represent an invalid UUID (vs a NIL UUID). + + ###### Install +-`go get github.com/google/uuid` ++```sh ++go get github.com/google/uuid ++``` + + ###### Documentation +-[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) ++[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) + + Full `go doc` style documentation for the package can be viewed online without + installing this package by using the GoDoc site here: +diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go +index 24b78edc907..b2a0bc8711b 100644 +--- a/vendor/github.com/google/uuid/node_js.go ++++ b/vendor/github.com/google/uuid/node_js.go +@@ -7,6 +7,6 @@ + package uuid + + // getHardwareInterface returns nil values for the JS version of the code. +-// This remvoves the "net" dependency, because it is not used in the browser. ++// This removes the "net" dependency, because it is not used in the browser. + // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. + func getHardwareInterface(name string) (string, []byte) { return "", nil } +diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go +index a57207aeb6f..a56138cc4bd 100644 +--- a/vendor/github.com/google/uuid/uuid.go ++++ b/vendor/github.com/google/uuid/uuid.go +@@ -69,7 +69,7 @@ func Parse(s string) (UUID, error) { + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: +- if strings.ToLower(s[:9]) != "urn:uuid:" { ++ if !strings.EqualFold(s[:9], "urn:uuid:") { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] +@@ -101,7 +101,8 @@ func Parse(s string) (UUID, error) { + 9, 11, + 14, 16, + 19, 21, +- 24, 26, 28, 30, 32, 34} { ++ 24, 26, 28, 30, 32, 34, ++ } { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") +@@ -117,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +- if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { ++ if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] +@@ -145,7 +146,8 @@ func ParseBytes(b []byte) (UUID, error) { + 9, 11, + 14, 16, + 19, 21, +- 24, 26, 28, 30, 32, 34} { ++ 24, 26, 28, 30, 32, 34, ++ } { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") +diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +index 10295639c5a..91d60a809fa 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json ++++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +@@ -1,3 +1,3 @@ + { +- "v2": "2.7.1" ++ "v2": "2.11.0" + } +diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +index 41a7ca94d4d..e17b196f6c7 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md ++++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +@@ -1,5 +1,50 @@ + # Changelog + ++## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) ++ ++ ++### Features ++ ++* **v2:** add GoVersion package variable ([#283](https://github.com/googleapis/gax-go/issues/283)) ([26553cc](https://github.com/googleapis/gax-go/commit/26553ccadb4016b189881f52e6c253b68bb3e3d5)) ++ ++ ++### Bug Fixes ++ ++* **v2:** handle space in non-devel go version ([#288](https://github.com/googleapis/gax-go/issues/288)) ([fd7bca0](https://github.com/googleapis/gax-go/commit/fd7bca029a1c5e63def8f0a5fd1ec3f725d92f75)) ++ ++## [2.10.0](https://github.com/googleapis/gax-go/compare/v2.9.1...v2.10.0) (2023-05-30) ++ ++ ++### Features ++ ++* update dependencies ([#280](https://github.com/googleapis/gax-go/issues/280)) ([4514281](https://github.com/googleapis/gax-go/commit/4514281058590f3637c36bfd49baa65c4d3cfb21)) ++ ++## [2.9.1](https://github.com/googleapis/gax-go/compare/v2.9.0...v2.9.1) (2023-05-23) ++ ++ ++### Bug Fixes ++ ++* **v2:** drop cloud lro test dep ([#276](https://github.com/googleapis/gax-go/issues/276)) ([c67eeba](https://github.com/googleapis/gax-go/commit/c67eeba0f10a3294b1d93c1b8fbe40211a55ae5f)), refs [#270](https://github.com/googleapis/gax-go/issues/270) ++ ++## [2.9.0](https://github.com/googleapis/gax-go/compare/v2.8.0...v2.9.0) (2023-05-22) ++ ++ ++### Features ++ ++* **apierror:** add method to return HTTP status code conditionally ([#274](https://github.com/googleapis/gax-go/issues/274)) ([5874431](https://github.com/googleapis/gax-go/commit/587443169acd10f7f86d1989dc8aaf189e645e98)), refs [#229](https://github.com/googleapis/gax-go/issues/229) ++ ++ ++### Documentation ++ ++* add ref to usage with clients ([#272](https://github.com/googleapis/gax-go/issues/272)) ([ea4d72d](https://github.com/googleapis/gax-go/commit/ea4d72d514beba4de450868b5fb028601a29164e)), refs [#228](https://github.com/googleapis/gax-go/issues/228) ++ ++## [2.8.0](https://github.com/googleapis/gax-go/compare/v2.7.1...v2.8.0) (2023-03-15) ++ ++ ++### Features ++ ++* **v2:** add WithTimeout option ([#259](https://github.com/googleapis/gax-go/issues/259)) ([9a8da43](https://github.com/googleapis/gax-go/commit/9a8da43693002448b1e8758023699387481866d1)) ++ + ## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) + + +diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +index ed862c8b398..d785a065cab 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go ++++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +@@ -29,6 +29,10 @@ + + // Package apierror implements a wrapper error for parsing error details from + // API calls. Both HTTP & gRPC status errors are supported. ++// ++// For examples of how to use [APIError] with client libraries please reference ++// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors) ++// in the client library documentation. + package apierror + + import ( +@@ -345,3 +349,13 @@ func parseHTTPDetails(gae *googleapi.Error) ErrDetails { + + return parseDetails(details) + } ++ ++// HTTPCode returns the underlying HTTP response status code. This method returns ++// `-1` if the underlying error is a [google.golang.org/grpc/status.Status]. To ++// check gRPC error codes use [google.golang.org/grpc/status.Code]. ++func (a *APIError) HTTPCode() int { ++ if a.httpErr == nil { ++ return -1 ++ } ++ return a.httpErr.Code ++} +diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go +index e092005563b..c52e03f6436 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/call_option.go ++++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go +@@ -218,6 +218,14 @@ func (p pathOpt) Resolve(s *CallSettings) { + s.Path = p.p + } + ++type timeoutOpt struct { ++ t time.Duration ++} ++ ++func (t timeoutOpt) Resolve(s *CallSettings) { ++ s.timeout = t.t ++} ++ + // WithPath applies a Path override to the HTTP-based APICall. + // + // This is for internal use only. +@@ -230,6 +238,15 @@ func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) + } + ++// WithTimeout is a convenience option for setting a context.WithTimeout on the ++// singular context.Context used for **all** APICall attempts. Calculated from ++// the start of the first APICall attempt. ++// If the context.Context provided to Invoke already has a Deadline set, that ++// will always be respected over the deadline calculated using this option. ++func WithTimeout(t time.Duration) CallOption { ++ return &timeoutOpt{t: t} ++} ++ + // CallSettings allow fine-grained control over how calls are made. + type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. +@@ -241,4 +258,8 @@ type CallSettings struct { + + // Path is an HTTP override for an APICall. + Path string ++ ++ // Timeout defines the amount of time that Invoke has to complete. ++ // Unexported so it cannot be changed by the code in an APICall. ++ timeout time.Duration + } +diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go +index 139371a0bf1..6488461f4dc 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/header.go ++++ b/vendor/github.com/googleapis/gax-go/v2/header.go +@@ -29,7 +29,73 @@ + + package gax + +-import "bytes" ++import ( ++ "bytes" ++ "runtime" ++ "strings" ++ "unicode" ++) ++ ++var ( ++ // GoVersion is a header-safe representation of the current runtime ++ // environment's Go version. This is for GAX consumers that need to ++ // report the Go runtime version in API calls. ++ GoVersion string ++ // version is a package internal global variable for testing purposes. ++ version = runtime.Version ++) ++ ++// versionUnknown is only used when the runtime version cannot be determined. ++const versionUnknown = "UNKNOWN" ++ ++func init() { ++ GoVersion = goVersion() ++} ++ ++// goVersion returns a Go runtime version derived from the runtime environment ++// that is modified to be suitable for reporting in a header, meaning it has no ++// whitespace. If it is unable to determine the Go runtime version, it returns ++// versionUnknown. ++func goVersion() string { ++ const develPrefix = "devel +" ++ ++ s := version() ++ if strings.HasPrefix(s, develPrefix) { ++ s = s[len(develPrefix):] ++ if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { ++ s = s[:p] ++ } ++ return s ++ } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { ++ s = s[:p] ++ } ++ ++ notSemverRune := func(r rune) bool { ++ return !strings.ContainsRune("0123456789.", r) ++ } ++ ++ if strings.HasPrefix(s, "go1") { ++ s = s[2:] ++ var prerelease string ++ if p := strings.IndexFunc(s, notSemverRune); p >= 0 { ++ s, prerelease = s[:p], s[p:] ++ } ++ if strings.HasSuffix(s, ".") { ++ s += "0" ++ } else if strings.Count(s, ".") < 2 { ++ s += ".0" ++ } ++ if prerelease != "" { ++ // Some release candidates already have a dash in them. ++ if !strings.HasPrefix(prerelease, "-") { ++ prerelease = "-" + prerelease ++ } ++ s += prerelease ++ } ++ return s ++ } ++ return "UNKNOWN" ++} + + // XGoogHeader is for use by the Google Cloud Libraries only. + // +diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go +index 936873ec4f8..374dcdb1151 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go ++++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go +@@ -30,4 +30,4 @@ + package internal + + // Version is the current tagged release of the library. +-const Version = "2.7.1" ++const Version = "2.11.0" +diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go +index 9fcc29959b9..721d1af5517 100644 +--- a/vendor/github.com/googleapis/gax-go/v2/invoke.go ++++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go +@@ -68,6 +68,16 @@ type sleeper func(ctx context.Context, d time.Duration) error + // invoke implements Invoke, taking an additional sleeper argument for testing. + func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer ++ ++ // Only use the value provided via WithTimeout if the context doesn't ++ // already have a deadline. This is important for backwards compatibility if ++ // the user already set a deadline on the context given to Invoke. ++ if _, ok := ctx.Deadline(); !ok && settings.timeout != 0 { ++ c, cc := context.WithTimeout(ctx, settings.timeout) ++ defer cc() ++ ctx = c ++ } ++ + for { + err := call(ctx, settings) + if err == nil { +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go +index 138f7c12f0e..c056bd3058a 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go +@@ -1,10 +1,10 @@ ++//go:build gofuzz + // +build gofuzz + + package httprule + + func Fuzz(data []byte) int { +- _, err := Parse(string(data)) +- if err != nil { ++ if _, err := Parse(string(data)); err != nil { + return 0 + } + return 0 +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go +index 5edd784e62a..65ffcf5cf87 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go +@@ -1,6 +1,7 @@ + package httprule + + import ( ++ "errors" + "fmt" + "strings" + ) +@@ -164,9 +165,9 @@ func (p *parser) segment() (segment, error) { + + v, err := p.variable() + if err != nil { +- return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err) ++ return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err) + } +- return v, err ++ return v, nil + } + + func (p *parser) literal() (segment, error) { +@@ -191,7 +192,7 @@ func (p *parser) variable() (segment, error) { + if _, err := p.accept("="); err == nil { + segs, err = p.segments() + if err != nil { +- return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err) ++ return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err) + } + } else { + segs = []segment{wildcard{}} +@@ -213,12 +214,12 @@ func (p *parser) fieldPath() (string, error) { + } + components := []string{c} + for { +- if _, err = p.accept("."); err != nil { ++ if _, err := p.accept("."); err != nil { + return strings.Join(components, "."), nil + } + c, err := p.accept(typeIdent) + if err != nil { +- return "", fmt.Errorf("invalid field path component: %v", err) ++ return "", fmt.Errorf("invalid field path component: %w", err) + } + components = append(components, c) + } +@@ -237,10 +238,8 @@ const ( + typeEOF = termType("$") + ) + +-const ( +- // eof is the terminal symbol which always appears at the end of token sequence. +- eof = "\u0000" +-) ++// eof is the terminal symbol which always appears at the end of token sequence. ++const eof = "\u0000" + + // accept tries to accept a token in "p". + // This function consumes a token and returns it if it matches to the specified "term". +@@ -275,11 +274,12 @@ func (p *parser) accept(term termType) (string, error) { + // expectPChars determines if "t" consists of only pchars defined in RFC3986. + // + // https://www.ietf.org/rfc/rfc3986.txt, P.49 +-// pchar = unreserved / pct-encoded / sub-delims / ":" / "@" +-// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" +-// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" +-// / "*" / "+" / "," / ";" / "=" +-// pct-encoded = "%" HEXDIG HEXDIG ++// ++// pchar = unreserved / pct-encoded / sub-delims / ":" / "@" ++// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" ++// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" ++// / "*" / "+" / "," / ";" / "=" ++// pct-encoded = "%" HEXDIG HEXDIG + func expectPChars(t string) error { + const ( + init = iota +@@ -333,7 +333,7 @@ func expectPChars(t string) error { + // expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*). + func expectIdent(ident string) error { + if ident == "" { +- return fmt.Errorf("empty identifier") ++ return errors.New("empty identifier") + } + for pos, r := range ident { + switch { +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +index 95f867a5286..a8789f17022 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +@@ -27,9 +27,9 @@ go_library( + "//internal/httprule", + "//utilities", + "@go_googleapis//google/api:httpbody_go_proto", +- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//grpclog", ++ "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", +@@ -37,6 +37,8 @@ go_library( + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//reflect/protoregistry", + "@org_golang_google_protobuf//types/known/durationpb", ++ "@org_golang_google_protobuf//types/known/fieldmaskpb", ++ "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +@@ -56,8 +58,10 @@ go_test( + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", ++ "mux_internal_test.go", + "mux_test.go", + "pattern_test.go", ++ "query_fuzz_test.go", + "query_test.go", + ], + embed = [":runtime"], +@@ -69,8 +73,9 @@ go_test( + "@go_googleapis//google/api:httpbody_go_proto", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@go_googleapis//google/rpc:status_go_proto", +- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", ++ "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", ++ "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", +@@ -78,6 +83,7 @@ go_test( + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", ++ "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +index fb57b9366ea..31553e7848a 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +@@ -13,6 +13,7 @@ import ( + "time" + + "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + ) +@@ -35,11 +36,15 @@ const metadataHeaderBinarySuffix = "-Bin" + const xForwardedFor = "X-Forwarded-For" + const xForwardedHost = "X-Forwarded-Host" + +-var ( +- // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound +- // header isn't present. If the value is 0 the sent `context` will not have a timeout. +- DefaultContextTimeout = 0 * time.Second +-) ++// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound ++// header isn't present. If the value is 0 the sent `context` will not have a timeout. ++var DefaultContextTimeout = 0 * time.Second ++ ++// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed. ++// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context. ++var malformedHTTPHeaders = map[string]struct{}{ ++ "connection": {}, ++} + + type ( + rpcMethodKey struct{} +@@ -95,12 +100,43 @@ func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Reque + return metadata.NewIncomingContext(ctx, md), nil + } + ++func isValidGRPCMetadataKey(key string) bool { ++ // Must be a valid gRPC "Header-Name" as defined here: ++ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md ++ // This means 0-9 a-z _ - . ++ // Only lowercase letters are valid in the wire protocol, but the client library will normalize ++ // uppercase ASCII to lowercase, so uppercase ASCII is also acceptable. ++ bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode. ++ for _, ch := range bytes { ++ validLowercaseLetter := ch >= 'a' && ch <= 'z' ++ validUppercaseLetter := ch >= 'A' && ch <= 'Z' ++ validDigit := ch >= '0' && ch <= '9' ++ validOther := ch == '.' || ch == '-' || ch == '_' ++ if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther { ++ return false ++ } ++ } ++ return true ++} ++ ++func isValidGRPCMetadataTextValue(textValue string) bool { ++ // Must be a valid gRPC "ASCII-Value" as defined here: ++ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md ++ // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive. ++ bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode. ++ for _, ch := range bytes { ++ if ch < 0x20 || ch > 0x7E { ++ return false ++ } ++ } ++ return true ++} ++ + func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) { + ctx = withRPCMethod(ctx, rpcMethodName) + for _, o := range options { + ctx = o(ctx) + } +- var pairs []string + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error +@@ -109,7 +145,7 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } +- ++ var pairs []string + for key, vals := range req.Header { + key = textproto.CanonicalMIMEHeaderKey(key) + for _, val := range vals { +@@ -118,6 +154,10 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { ++ if !isValidGRPCMetadataKey(h) { ++ grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h) ++ continue ++ } + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { +@@ -127,6 +167,9 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM + } + + val = string(b) ++ } else if !isValidGRPCMetadataTextValue(val) { ++ grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h) ++ continue + } + pairs = append(pairs, h, val) + } +@@ -172,11 +215,17 @@ type serverMetadataKey struct{} + + // NewServerMetadataContext creates a new context with ServerMetadata + func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { ++ if ctx == nil { ++ ctx = context.Background() ++ } + return context.WithValue(ctx, serverMetadataKey{}, md) + } + + // ServerMetadataFromContext returns the ServerMetadata in ctx + func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { ++ if ctx == nil { ++ return md, false ++ } + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return + } +@@ -269,8 +318,8 @@ func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + case 'n': + return time.Nanosecond, true + default: ++ return + } +- return + } + + // isPermanentHTTPHeader checks whether hdr belongs to the list of +@@ -308,6 +357,13 @@ func isPermanentHTTPHeader(hdr string) bool { + return false + } + ++// isMalformedHTTPHeader checks whether header belongs to the list of ++// "malformed headers" and would be rejected by the gRPC server. ++func isMalformedHTTPHeader(header string) bool { ++ _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)] ++ return isMalformed ++} ++ + // RPCMethod returns the method string for the server context. The returned + // string is in the format of "/package.service/method". + func RPCMethod(ctx context.Context) (string, bool) { +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +index e6bc4e6ceec..d7b15fcfb3f 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +@@ -37,7 +37,7 @@ func BoolSlice(val, sep string) ([]bool, error) { + for i, v := range s { + value, err := Bool(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -57,7 +57,7 @@ func Float64Slice(val, sep string) ([]float64, error) { + for i, v := range s { + value, err := Float64(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -81,7 +81,7 @@ func Float32Slice(val, sep string) ([]float32, error) { + for i, v := range s { + value, err := Float32(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -101,7 +101,7 @@ func Int64Slice(val, sep string) ([]int64, error) { + for i, v := range s { + value, err := Int64(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -125,7 +125,7 @@ func Int32Slice(val, sep string) ([]int32, error) { + for i, v := range s { + value, err := Int32(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -145,7 +145,7 @@ func Uint64Slice(val, sep string) ([]uint64, error) { + for i, v := range s { + value, err := Uint64(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -169,7 +169,7 @@ func Uint32Slice(val, sep string) ([]uint32, error) { + for i, v := range s { + value, err := Uint32(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -197,7 +197,7 @@ func BytesSlice(val, sep string) ([][]byte, error) { + for i, v := range s { + value, err := Bytes(v) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } +@@ -209,8 +209,7 @@ func Timestamp(val string) (*timestamppb.Timestamp, error) { + var r timestamppb.Timestamp + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} +- err := unmarshaler.Unmarshal([]byte(val), &r) +- if err != nil { ++ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +@@ -221,8 +220,7 @@ func Duration(val string) (*durationpb.Duration, error) { + var r durationpb.Duration + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} +- err := unmarshaler.Unmarshal([]byte(val), &r) +- if err != nil { ++ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +@@ -257,66 +255,64 @@ func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { +- return values, err ++ return nil, err + } + values[i] = value + } + return values, nil + } + +-/* +- Support fot google.protobuf.wrappers on top of primitive types +-*/ ++// Support for google.protobuf.wrappers on top of primitive types + + // StringValue well-known type support as wrapper around string type + func StringValue(val string) (*wrapperspb.StringValue, error) { +- return &wrapperspb.StringValue{Value: val}, nil ++ return wrapperspb.String(val), nil + } + + // FloatValue well-known type support as wrapper around float32 type + func FloatValue(val string) (*wrapperspb.FloatValue, error) { + parsedVal, err := Float32(val) +- return &wrapperspb.FloatValue{Value: parsedVal}, err ++ return wrapperspb.Float(parsedVal), err + } + + // DoubleValue well-known type support as wrapper around float64 type + func DoubleValue(val string) (*wrapperspb.DoubleValue, error) { + parsedVal, err := Float64(val) +- return &wrapperspb.DoubleValue{Value: parsedVal}, err ++ return wrapperspb.Double(parsedVal), err + } + + // BoolValue well-known type support as wrapper around bool type + func BoolValue(val string) (*wrapperspb.BoolValue, error) { + parsedVal, err := Bool(val) +- return &wrapperspb.BoolValue{Value: parsedVal}, err ++ return wrapperspb.Bool(parsedVal), err + } + + // Int32Value well-known type support as wrapper around int32 type + func Int32Value(val string) (*wrapperspb.Int32Value, error) { + parsedVal, err := Int32(val) +- return &wrapperspb.Int32Value{Value: parsedVal}, err ++ return wrapperspb.Int32(parsedVal), err + } + + // UInt32Value well-known type support as wrapper around uint32 type + func UInt32Value(val string) (*wrapperspb.UInt32Value, error) { + parsedVal, err := Uint32(val) +- return &wrapperspb.UInt32Value{Value: parsedVal}, err ++ return wrapperspb.UInt32(parsedVal), err + } + + // Int64Value well-known type support as wrapper around int64 type + func Int64Value(val string) (*wrapperspb.Int64Value, error) { + parsedVal, err := Int64(val) +- return &wrapperspb.Int64Value{Value: parsedVal}, err ++ return wrapperspb.Int64(parsedVal), err + } + + // UInt64Value well-known type support as wrapper around uint64 type + func UInt64Value(val string) (*wrapperspb.UInt64Value, error) { + parsedVal, err := Uint64(val) +- return &wrapperspb.UInt64Value{Value: parsedVal}, err ++ return wrapperspb.UInt64(parsedVal), err + } + + // BytesValue well-known type support as wrapper around bytes[] type + func BytesValue(val string) (*wrapperspb.BytesValue, error) { + parsedVal, err := Bytes(val) +- return &wrapperspb.BytesValue{Value: parsedVal}, err ++ return wrapperspb.Bytes(parsedVal), err + } +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +index d9e0013c439..d2bcbb7d2a2 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +@@ -38,7 +38,7 @@ func HTTPStatusFromCode(code codes.Code) int { + case codes.OK: + return http.StatusOK + case codes.Canceled: +- return http.StatusRequestTimeout ++ return 499 + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: +@@ -70,10 +70,10 @@ func HTTPStatusFromCode(code codes.Code) int { + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError ++ default: ++ grpclog.Infof("Unknown gRPC error code: %v", code) ++ return http.StatusInternalServerError + } +- +- grpclog.Infof("Unknown gRPC error code: %v", code) +- return http.StatusInternalServerError + } + + // HTTPError uses the mux-configured error handler. +@@ -162,10 +162,11 @@ func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status { + + // DefaultRoutingErrorHandler is our default handler for routing errors. + // By default http error codes mapped on the following error codes: +-// NotFound -> grpc.NotFound +-// StatusBadRequest -> grpc.InvalidArgument +-// MethodNotAllowed -> grpc.Unimplemented +-// Other -> grpc.Internal, method is not expecting to be called for anything else ++// ++// NotFound -> grpc.NotFound ++// StatusBadRequest -> grpc.InvalidArgument ++// MethodNotAllowed -> grpc.Unimplemented ++// Other -> grpc.Internal, method is not expecting to be called for anything else + func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) { + sterr := status.Error(codes.Internal, "Unexpected routing error") + switch httpStatus { +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +index 0138ed2f769..a03dd166bd7 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +@@ -2,13 +2,14 @@ package runtime + + import ( + "encoding/json" ++ "errors" + "fmt" + "io" + "sort" + +- "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" ++ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" + ) + + func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor { +@@ -44,7 +45,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field + // if the item is an object, then enqueue all of its children + for k, v := range m { + if item.msg == nil { +- return nil, fmt.Errorf("JSON structure did not match request type") ++ return nil, errors.New("JSON structure did not match request type") + } + + fd := getFieldByName(item.msg.Descriptor().Fields(), k) +@@ -53,7 +54,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field + } + + if isDynamicProtoMessage(fd.Message()) { +- for _, p := range buildPathsBlindly(k, v) { ++ for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) { + newPath := p + if item.path != "" { + newPath = item.path + "." + newPath +@@ -63,7 +64,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field + continue + } + +- if isProtobufAnyMessage(fd.Message()) { ++ if isProtobufAnyMessage(fd.Message()) && !fd.IsList() { + _, hasTypeField := v.(map[string]interface{})["@type"] + if hasTypeField { + queue = append(queue, fieldMaskPathItem{path: k}) +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +index d1e21df4810..945f3a5ebf3 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +@@ -52,11 +52,11 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal + return + } + if err != nil { +- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) ++ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { +- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) ++ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + +@@ -82,15 +82,15 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal + + if err != nil { + grpclog.Infof("Failed to marshal response chunk: %v", err) +- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) ++ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } +- if _, err = w.Write(buf); err != nil { ++ if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to send response chunk: %v", err) + return + } + wroteHeader = true +- if _, err = w.Write(delimiter); err != nil { ++ if _, err := w.Write(delimiter); err != nil { + grpclog.Infof("Failed to send delimiter chunk: %v", err) + return + } +@@ -200,20 +200,24 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re + return nil + } + +-func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { ++func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(msg)) + w.WriteHeader(HTTPStatusFromCode(st.Code())) + } +- buf, merr := marshaler.Marshal(msg) +- if merr != nil { +- grpclog.Infof("Failed to marshal an error: %v", merr) ++ buf, err := marshaler.Marshal(msg) ++ if err != nil { ++ grpclog.Infof("Failed to marshal an error: %v", err) ++ return ++ } ++ if _, err := w.Write(buf); err != nil { ++ grpclog.Infof("Failed to notify error to client: %v", err) + return + } +- if _, werr := w.Write(buf); werr != nil { +- grpclog.Infof("Failed to notify error to client: %v", werr) ++ if _, err := w.Write(delimiter); err != nil { ++ grpclog.Infof("Failed to send delimiter chunk: %v", err) + return + } + } +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +index 7387c8e3976..51b8247da2a 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +@@ -92,23 +92,20 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer +- err := buf.WriteByte('[') +- if err != nil { ++ if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { +- err = buf.WriteByte(',') +- if err != nil { ++ if err := buf.WriteByte(','); err != nil { + return nil, err + } + } +- if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { ++ if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } +- err = buf.WriteByte(']') +- if err != nil { ++ if err := buf.WriteByte(']'); err != nil { + return nil, err + } + +@@ -117,17 +114,16 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + + if rv.Type().Elem().Implements(typeProtoEnum) { + var buf bytes.Buffer +- err := buf.WriteByte('[') +- if err != nil { ++ if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { +- err = buf.WriteByte(',') +- if err != nil { ++ if err := buf.WriteByte(','); err != nil { + return nil, err + } + } ++ var err error + if j.UseEnumNumbers { + _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10)) + } else { +@@ -137,8 +133,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + return nil, err + } + } +- err = buf.WriteByte(']') +- if err != nil { ++ if err := buf.WriteByte(']'); err != nil { + return nil, err + } + +@@ -219,8 +214,7 @@ func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v int + + // Decode into bytes for marshalling + var b json.RawMessage +- err := d.Decode(&b) +- if err != nil { ++ if err := d.Decode(&b); err != nil { + return err + } + +@@ -239,8 +233,7 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions + if rv.Type().ConvertibleTo(typeProtoMessage) { + // Decode into bytes for marshalling + var b json.RawMessage +- err := d.Decode(&b) +- if err != nil { ++ if err := d.Decode(&b); err != nil { + return err + } + +@@ -280,6 +273,17 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions + return nil + } + if rv.Kind() == reflect.Slice { ++ if rv.Type().Elem().Kind() == reflect.Uint8 { ++ var sl []byte ++ if err := d.Decode(&sl); err != nil { ++ return err ++ } ++ if sl != nil { ++ rv.SetBytes(sl) ++ } ++ return nil ++ } ++ + var sl []json.RawMessage + if err := d.Decode(&sl); err != nil { + return err +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go +index 007f8f1a2c7..398c780dc22 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go +@@ -1,10 +1,8 @@ + package runtime + + import ( +- "io" +- + "errors" +- "io/ioutil" ++ "io" + + "google.golang.org/protobuf/proto" + ) +@@ -38,7 +36,7 @@ func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + // NewDecoder returns a Decoder which reads proto stream from "reader". + func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { +- buffer, err := ioutil.ReadAll(reader) ++ buffer, err := io.ReadAll(reader) + if err != nil { + return err + } +@@ -53,8 +51,7 @@ func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + if err != nil { + return err + } +- _, err = writer.Write(buffer) +- if err != nil { ++ if _, err := writer.Write(buffer); err != nil { + return err + } + +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +index 46a4aabaf95..f451cb441f4 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +@@ -6,10 +6,13 @@ import ( + "fmt" + "net/http" + "net/textproto" ++ "regexp" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" + "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/grpclog" ++ "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +@@ -23,15 +26,15 @@ const ( + // path string before doing any routing. + UnescapingModeLegacy UnescapingMode = iota + +- // EscapingTypeExceptReserved unescapes all path parameters except RFC 6570 ++ // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570 + // reserved characters. + UnescapingModeAllExceptReserved + +- // EscapingTypeExceptSlash unescapes URL path parameters except path +- // seperators, which will be left as "%2F". ++ // UnescapingModeAllExceptSlash unescapes URL path parameters except path ++ // separators, which will be left as "%2F". + UnescapingModeAllExceptSlash + +- // URL path parameters will be fully decoded. ++ // UnescapingModeAllCharacters unescapes all URL path parameters. + UnescapingModeAllCharacters + + // UnescapingModeDefault is the default escaping type. +@@ -40,6 +43,8 @@ const ( + UnescapingModeDefault = UnescapingModeLegacy + ) + ++var encodedPathSplitter = regexp.MustCompile("(/|%2F)") ++ + // A HandlerFunc handles a specific pair of path pattern and HTTP method. + type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +@@ -75,7 +80,7 @@ func WithForwardResponseOption(forwardResponseOption func(context.Context, http. + } + } + +-// WithEscapingType sets the escaping type. See the definitions of UnescapingMode ++// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode + // for more information. + func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { + return func(serveMux *ServeMux) { +@@ -96,13 +101,14 @@ func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMux + type HeaderMatcherFunc func(string) (string, bool) + + // DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +-// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +-// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. ++// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function. ++// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'. ++// Other headers are not added to the gRPC metadata. + func DefaultHeaderMatcher(key string) (string, bool) { +- key = textproto.CanonicalMIMEHeaderKey(key) +- if isPermanentHTTPHeader(key) { ++ switch key = textproto.CanonicalMIMEHeaderKey(key); { ++ case isPermanentHTTPHeader(key): + return MetadataPrefix + key, true +- } else if strings.HasPrefix(key, MetadataHeaderPrefix) { ++ case strings.HasPrefix(key, MetadataHeaderPrefix): + return key[len(MetadataHeaderPrefix):], true + } + return "", false +@@ -113,11 +119,30 @@ func DefaultHeaderMatcher(key string) (string, bool) { + // This matcher will be called with each header in http.Request. If matcher returns true, that header will be + // passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. + func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { ++ for _, header := range fn.matchedMalformedHeaders() { ++ grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header) ++ } ++ + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } + } + ++// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server. ++func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string { ++ if fn == nil { ++ return nil ++ } ++ headers := make([]string, 0) ++ for header := range malformedHTTPHeaders { ++ out, accept := fn(header) ++ if accept && isMalformedHTTPHeader(out) { ++ headers = append(headers, out) ++ } ++ } ++ return headers ++} ++ + // WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. + // + // This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +@@ -179,6 +204,56 @@ func WithDisablePathLengthFallback() ServeMuxOption { + } + } + ++// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath. ++// When called the handler will forward the request to the upstream grpc service health check (defined in the ++// gRPC Health Checking Protocol). ++// ++// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how ++// to setup the protocol in the grpc server. ++// ++// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest. ++func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption { ++ return func(s *ServeMux) { ++ // error can be ignored since pattern is definitely valid ++ _ = s.HandlePath( ++ http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string, ++ ) { ++ _, outboundMarshaler := MarshalerForRequest(s, r) ++ ++ resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{ ++ Service: r.URL.Query().Get("service"), ++ }) ++ if err != nil { ++ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) ++ return ++ } ++ ++ w.Header().Set("Content-Type", "application/json") ++ ++ if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { ++ switch resp.GetStatus() { ++ case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN: ++ err = status.Error(codes.Unavailable, resp.String()) ++ case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN: ++ err = status.Error(codes.NotFound, resp.String()) ++ } ++ ++ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) ++ return ++ } ++ ++ _ = outboundMarshaler.NewEncoder(w).Encode(resp) ++ }) ++ } ++} ++ ++// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux. ++// ++// See WithHealthEndpointAt for the general implementation. ++func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption { ++ return WithHealthEndpointAt(healthCheckClient, "/healthz") ++} ++ + // NewServeMux returns a new ServeMux whose internal mapping is empty. + func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ +@@ -229,7 +304,7 @@ func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) er + return nil + } + +-// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. ++// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path. + func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + +@@ -245,8 +320,6 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + path = r.URL.RawPath + } + +- components := strings.Split(path[1:], "/") +- + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + r.Method = strings.ToUpper(override) + if err := r.ParseForm(); err != nil { +@@ -257,8 +330,18 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + } + } + +- // Verb out here is to memoize for the fallback case below +- var verb string ++ var pathComponents []string ++ // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F" ++ // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the ++ // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default ++ // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved ++ if s.unescapingMode == UnescapingModeAllCharacters { ++ pathComponents = encodedPathSplitter.Split(path[1:], -1) ++ } else { ++ pathComponents = strings.Split(path[1:], "/") ++ } ++ ++ lastPathComponent := pathComponents[len(pathComponents)-1] + + for _, h := range s.handlers[r.Method] { + // If the pattern has a verb, explicitly look for a suffix in the last +@@ -269,23 +352,28 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // parser because we know what verb we're looking for, however, there + // are still some cases that the parser itself cannot disambiguate. See + // the comment there if interested. ++ ++ var verb string + patVerb := h.pat.Verb() +- l := len(components) +- lastComponent := components[l-1] +- var idx int = -1 +- if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) { +- idx = len(lastComponent) - len(patVerb) - 1 ++ ++ idx := -1 ++ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { ++ idx = len(lastPathComponent) - len(patVerb) - 1 + } + if idx == 0 { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) + return + } ++ ++ comps := make([]string, len(pathComponents)) ++ copy(comps, pathComponents) ++ + if idx > 0 { +- components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:] ++ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] + } + +- pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode) ++ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { +@@ -301,14 +389,33 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + return + } + +- // lookup other methods to handle fallback from GET to POST and +- // to determine if it is NotImplemented or NotFound. ++ // if no handler has found for the request, lookup for other methods ++ // to handle POST -> GET fallback if the request is subject to path ++ // length fallback. ++ // Note we are not eagerly checking the request here as we want to return the ++ // right HTTP status code, and we need to process the fallback candidates in ++ // order to do that. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { +- pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode) ++ var verb string ++ patVerb := h.pat.Verb() ++ ++ idx := -1 ++ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { ++ idx = len(lastPathComponent) - len(patVerb) - 1 ++ } ++ ++ comps := make([]string, len(pathComponents)) ++ copy(comps, pathComponents) ++ ++ if idx > 0 { ++ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] ++ } ++ ++ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { +@@ -320,8 +427,11 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + } + continue + } ++ + // X-HTTP-Method-Override is optional. Always allow fallback to POST. +- if s.isPathLengthFallback(r) { ++ // Also, only consider POST -> GET fallbacks, and avoid falling back to ++ // potentially dangerous operations like DELETE. ++ if s.isPathLengthFallback(r) && m == http.MethodGet { + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go +index df7cb81426a..8f90d15a562 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go +@@ -15,8 +15,6 @@ var ( + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +- // ErrMalformedSequence indicates that an escape sequence was malformed. +- ErrMalformedSequence = errors.New("malformed escape sequence") + ) + + type MalformedSequenceError string +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +index fb0c84ef0cd..d01933c4fd2 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +@@ -1,7 +1,6 @@ + package runtime + + import ( +- "encoding/base64" + "errors" + "fmt" + "net/url" +@@ -11,19 +10,21 @@ import ( + "time" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" +- "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/grpc/grpclog" ++ "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/known/durationpb" ++ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" ++ "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" + ) + + var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`) + +-var currentQueryParser QueryParameterParser = &defaultQueryParser{} ++var currentQueryParser QueryParameterParser = &DefaultQueryParser{} + + // QueryParameterParser defines interface for all query parameter parsers + type QueryParameterParser interface { +@@ -36,14 +37,17 @@ func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utili + return currentQueryParser.Parse(msg, values, filter) + } + +-type defaultQueryParser struct{} ++// DefaultQueryParser is a QueryParameterParser which implements the default ++// query parameters parsing behavior. ++// ++// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context. ++type DefaultQueryParser struct{} + + // Parse populates "values" into "msg". + // A value is ignored if its key starts with one of the elements in "filter". +-func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { ++func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { +- match := valuesKeyRegexp.FindStringSubmatch(key) +- if len(match) == 3 { ++ if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } +@@ -175,10 +179,10 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro + return protoreflect.ValueOfBool(v), nil + case protoreflect.EnumKind: + enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName()) +- switch { +- case errors.Is(err, protoregistry.NotFound): +- return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName()) +- case err != nil: ++ if err != nil { ++ if errors.Is(err, protoregistry.NotFound) { ++ return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName()) ++ } + return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err) + } + // Look for enum by name +@@ -189,8 +193,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + // Look for enum by number +- v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)) +- if v == nil { ++ if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + } +@@ -234,7 +237,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro + case protoreflect.StringKind: + return protoreflect.ValueOfString(value), nil + case protoreflect.BytesKind: +- v, err := base64.URLEncoding.DecodeString(value) ++ v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } +@@ -250,18 +253,12 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p + var msg proto.Message + switch msgDescriptor.FullName() { + case "google.protobuf.Timestamp": +- if value == "null" { +- break +- } + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return protoreflect.Value{}, err + } + msg = timestamppb.New(t) + case "google.protobuf.Duration": +- if value == "null" { +- break +- } + d, err := time.ParseDuration(value) + if err != nil { + return protoreflect.Value{}, err +@@ -272,55 +269,67 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.DoubleValue{Value: v} ++ msg = wrapperspb.Double(v) + case "google.protobuf.FloatValue": + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.FloatValue{Value: float32(v)} ++ msg = wrapperspb.Float(float32(v)) + case "google.protobuf.Int64Value": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.Int64Value{Value: v} ++ msg = wrapperspb.Int64(v) + case "google.protobuf.Int32Value": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.Int32Value{Value: int32(v)} ++ msg = wrapperspb.Int32(int32(v)) + case "google.protobuf.UInt64Value": + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.UInt64Value{Value: v} ++ msg = wrapperspb.UInt64(v) + case "google.protobuf.UInt32Value": + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.UInt32Value{Value: uint32(v)} ++ msg = wrapperspb.UInt32(uint32(v)) + case "google.protobuf.BoolValue": + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.BoolValue{Value: v} ++ msg = wrapperspb.Bool(v) + case "google.protobuf.StringValue": +- msg = &wrapperspb.StringValue{Value: value} ++ msg = wrapperspb.String(value) + case "google.protobuf.BytesValue": +- v, err := base64.URLEncoding.DecodeString(value) ++ v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } +- msg = &wrapperspb.BytesValue{Value: v} ++ msg = wrapperspb.Bytes(v) + case "google.protobuf.FieldMask": + fm := &field_mask.FieldMask{} + fm.Paths = append(fm.Paths, strings.Split(value, ",")...) + msg = fm ++ case "google.protobuf.Value": ++ var v structpb.Value ++ if err := protojson.Unmarshal([]byte(value), &v); err != nil { ++ return protoreflect.Value{}, err ++ } ++ msg = &v ++ case "google.protobuf.Struct": ++ var v structpb.Struct ++ if err := protojson.Unmarshal([]byte(value), &v); err != nil { ++ return protoreflect.Value{}, err ++ } ++ msg = &v + default: + return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName())) + } +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel +index 5d8d12bc421..b8940946577 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel +@@ -8,6 +8,7 @@ go_library( + "doc.go", + "pattern.go", + "readerfactory.go", ++ "string_array_flag.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities", +@@ -16,7 +17,10 @@ go_library( + go_test( + name = "utilities_test", + size = "small", +- srcs = ["trie_test.go"], ++ srcs = [ ++ "string_array_flag_test.go", ++ "trie_test.go", ++ ], + deps = [":utilities"], + ) + +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go +index 6dd3854665f..01d26edae3c 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go +@@ -3,13 +3,12 @@ package utilities + import ( + "bytes" + "io" +- "io/ioutil" + ) + + // IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins + // at the start of the stream + func IOReaderFactory(r io.Reader) (func() io.Reader, error) { +- b, err := ioutil.ReadAll(r) ++ b, err := io.ReadAll(r) + if err != nil { + return nil, err + } +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +new file mode 100644 +index 00000000000..d224ab776c0 +--- /dev/null ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +@@ -0,0 +1,33 @@ ++package utilities ++ ++import ( ++ "flag" ++ "strings" ++) ++ ++// flagInterface is an cut down interface to `flag` ++type flagInterface interface { ++ Var(value flag.Value, name string, usage string) ++} ++ ++// StringArrayFlag defines a flag with the specified name and usage string. ++// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag. ++func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags { ++ value := &StringArrayFlags{} ++ f.Var(value, name, usage) ++ return value ++} ++ ++// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var` ++type StringArrayFlags []string ++ ++// String returns a string representation of `StringArrayFlags` ++func (i *StringArrayFlags) String() string { ++ return strings.Join(*i, ",") ++} ++ ++// Set appends a value to `StringArrayFlags` ++func (i *StringArrayFlags) Set(value string) error { ++ *i = append(*i, value) ++ return nil ++} +diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go +index af3b703d505..dd99b0ed256 100644 +--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go ++++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go +@@ -40,7 +40,7 @@ func NewDoubleArray(seqs [][]string) *DoubleArray { + func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { +- var encoded []int ++ encoded := make([]int, 0, len(seq)) + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) +diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go +index 95d8e59da69..b774da88d86 100644 +--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go ++++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go +@@ -352,9 +352,9 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { + + // Greater asserts that the first element is greater than the second + // +-// assert.Greater(t, 2, 1) +-// assert.Greater(t, float64(2), float64(1)) +-// assert.Greater(t, "b", "a") ++// assert.Greater(t, 2, 1) ++// assert.Greater(t, float64(2), float64(1)) ++// assert.Greater(t, "b", "a") + func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -364,10 +364,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface + + // GreaterOrEqual asserts that the first element is greater than or equal to the second + // +-// assert.GreaterOrEqual(t, 2, 1) +-// assert.GreaterOrEqual(t, 2, 2) +-// assert.GreaterOrEqual(t, "b", "a") +-// assert.GreaterOrEqual(t, "b", "b") ++// assert.GreaterOrEqual(t, 2, 1) ++// assert.GreaterOrEqual(t, 2, 2) ++// assert.GreaterOrEqual(t, "b", "a") ++// assert.GreaterOrEqual(t, "b", "b") + func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -377,9 +377,9 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in + + // Less asserts that the first element is less than the second + // +-// assert.Less(t, 1, 2) +-// assert.Less(t, float64(1), float64(2)) +-// assert.Less(t, "a", "b") ++// assert.Less(t, 1, 2) ++// assert.Less(t, float64(1), float64(2)) ++// assert.Less(t, "a", "b") + func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -389,10 +389,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) + + // LessOrEqual asserts that the first element is less than or equal to the second + // +-// assert.LessOrEqual(t, 1, 2) +-// assert.LessOrEqual(t, 2, 2) +-// assert.LessOrEqual(t, "a", "b") +-// assert.LessOrEqual(t, "b", "b") ++// assert.LessOrEqual(t, 1, 2) ++// assert.LessOrEqual(t, 2, 2) ++// assert.LessOrEqual(t, "a", "b") ++// assert.LessOrEqual(t, "b", "b") + func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -402,8 +402,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter + + // Positive asserts that the specified element is positive + // +-// assert.Positive(t, 1) +-// assert.Positive(t, 1.23) ++// assert.Positive(t, 1) ++// assert.Positive(t, 1.23) + func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -414,8 +414,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + + // Negative asserts that the specified element is negative + // +-// assert.Negative(t, -1) +-// assert.Negative(t, -1.23) ++// assert.Negative(t, -1) ++// assert.Negative(t, -1.23) + func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go +index 7880b8f9433..84dbd6c790b 100644 +--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go ++++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go +@@ -22,9 +22,9 @@ func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bo + // Containsf asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +-// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +-// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") ++// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") ++// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") ++// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") + func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -56,7 +56,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string + // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// assert.Emptyf(t, obj, "error message %s", "formatted") ++// assert.Emptyf(t, obj, "error message %s", "formatted") + func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -66,7 +66,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) boo + + // Equalf asserts that two objects are equal. + // +-// assert.Equalf(t, 123, 123, "error message %s", "formatted") ++// assert.Equalf(t, 123, 123, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -81,8 +81,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar + // EqualErrorf asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") + func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -90,10 +90,27 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args + return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) + } + ++// EqualExportedValuesf asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true ++// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false ++func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) ++} ++ + // EqualValuesf asserts that two objects are equal or convertable to the same types + // and equal. + // +-// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") ++// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") + func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -103,10 +120,10 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri + + // Errorf asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.Errorf(t, err, "error message %s", "formatted") { +-// assert.Equal(t, expectedErrorf, err) +-// } ++// actualObj, err := SomeFunction() ++// if assert.Errorf(t, err, "error message %s", "formatted") { ++// assert.Equal(t, expectedErrorf, err) ++// } + func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -126,8 +143,8 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int + // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") + func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -147,7 +164,7 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface + // Eventuallyf asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -155,9 +172,34 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick + return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) + } + ++// EventuallyWithTf asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) ++} ++ + // Exactlyf asserts that two objects are equal in value and type. + // +-// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") ++// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") + func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -183,7 +225,7 @@ func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{} + + // Falsef asserts that the specified value is false. + // +-// assert.Falsef(t, myBool, "error message %s", "formatted") ++// assert.Falsef(t, myBool, "error message %s", "formatted") + func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -202,9 +244,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool + + // Greaterf asserts that the first element is greater than the second + // +-// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +-// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +-// assert.Greaterf(t, "b", "a", "error message %s", "formatted") ++// assert.Greaterf(t, 2, 1, "error message %s", "formatted") ++// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") ++// assert.Greaterf(t, "b", "a", "error message %s", "formatted") + func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -214,10 +256,10 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in + + // GreaterOrEqualf asserts that the first element is greater than or equal to the second + // +-// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") + func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -228,7 +270,7 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg + // HTTPBodyContainsf asserts that a specified handler returns a + // body that contains a string. + // +-// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { +@@ -241,7 +283,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url + // HTTPBodyNotContainsf asserts that a specified handler returns a + // body that does not contain a string. + // +-// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { +@@ -253,7 +295,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u + + // HTTPErrorf asserts that a specified handler returns an error status code. + // +-// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -265,7 +307,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, + + // HTTPRedirectf asserts that a specified handler returns a redirect status code. + // +-// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -277,7 +319,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri + + // HTTPStatusCodef asserts that a specified handler returns a specified status code. + // +-// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") ++// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { +@@ -289,7 +331,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st + + // HTTPSuccessf asserts that a specified handler returns a success status code. + // +-// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") ++// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -301,7 +343,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin + + // Implementsf asserts that an object is implemented by the specified interface. + // +-// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") ++// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") + func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -311,7 +353,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms + + // InDeltaf asserts that the two numerals are within delta of each other. + // +-// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") ++// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") + func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -353,9 +395,9 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil + + // IsDecreasingf asserts that the collection is decreasing + // +-// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +-// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +-// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") + func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -365,9 +407,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface + + // IsIncreasingf asserts that the collection is increasing + // +-// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +-// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +-// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") + func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -377,9 +419,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface + + // IsNonDecreasingf asserts that the collection is not decreasing + // +-// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +-// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +-// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") + func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -389,9 +431,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf + + // IsNonIncreasingf asserts that the collection is not increasing + // +-// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +-// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +-// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") + func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -409,7 +451,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin + + // JSONEqf asserts that two JSON strings are equivalent. + // +-// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") ++// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") + func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -420,7 +462,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int + // Lenf asserts that the specified object has specific length. + // Lenf also fails if the object has a type that len() not accept. + // +-// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") ++// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") + func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -430,9 +472,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf + + // Lessf asserts that the first element is less than the second + // +-// assert.Lessf(t, 1, 2, "error message %s", "formatted") +-// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +-// assert.Lessf(t, "a", "b", "error message %s", "formatted") ++// assert.Lessf(t, 1, 2, "error message %s", "formatted") ++// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") ++// assert.Lessf(t, "a", "b", "error message %s", "formatted") + func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -442,10 +484,10 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter + + // LessOrEqualf asserts that the first element is less than or equal to the second + // +-// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +-// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +-// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +-// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") ++// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") ++// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") ++// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") ++// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") + func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -455,8 +497,8 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . + + // Negativef asserts that the specified element is negative + // +-// assert.Negativef(t, -1, "error message %s", "formatted") +-// assert.Negativef(t, -1.23, "error message %s", "formatted") ++// assert.Negativef(t, -1, "error message %s", "formatted") ++// assert.Negativef(t, -1.23, "error message %s", "formatted") + func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -467,7 +509,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool + // Neverf asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -477,7 +519,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. + + // Nilf asserts that the specified object is nil. + // +-// assert.Nilf(t, err, "error message %s", "formatted") ++// assert.Nilf(t, err, "error message %s", "formatted") + func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -496,10 +538,10 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool + + // NoErrorf asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.NoErrorf(t, err, "error message %s", "formatted") { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if assert.NoErrorf(t, err, "error message %s", "formatted") { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -519,9 +561,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) boo + // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +-// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +-// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") + func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -532,9 +574,9 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { ++// assert.Equal(t, "two", obj[1]) ++// } + func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -544,7 +586,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) + + // NotEqualf asserts that the specified values are NOT equal. + // +-// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") ++// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -557,7 +599,7 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, + + // NotEqualValuesf asserts that two objects are not equal even when converted to the same type + // +-// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") ++// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") + func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -576,7 +618,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf + + // NotNilf asserts that the specified object is not nil. + // +-// assert.NotNilf(t, err, "error message %s", "formatted") ++// assert.NotNilf(t, err, "error message %s", "formatted") + func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -586,7 +628,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bo + + // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") ++// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") + func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -596,8 +638,8 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo + + // NotRegexpf asserts that a specified regexp does not match a string. + // +-// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +-// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") ++// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") ++// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") + func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -607,7 +649,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. + + // NotSamef asserts that two pointers do not reference the same object. + // +-// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") ++// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -621,7 +663,7 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, + // NotSubsetf asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") ++// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") + func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -639,7 +681,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + + // Panicsf asserts that the code inside the specified PanicTestFunc panics. + // +-// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") + func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -651,7 +693,7 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -662,7 +704,7 @@ func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, + // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -672,8 +714,8 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str + + // Positivef asserts that the specified element is positive + // +-// assert.Positivef(t, 1, "error message %s", "formatted") +-// assert.Positivef(t, 1.23, "error message %s", "formatted") ++// assert.Positivef(t, 1, "error message %s", "formatted") ++// assert.Positivef(t, 1.23, "error message %s", "formatted") + func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -683,8 +725,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool + + // Regexpf asserts that a specified regexp matches a string. + // +-// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +-// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") ++// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") ++// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") + func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -694,7 +736,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in + + // Samef asserts that two pointers reference the same object. + // +-// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") ++// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -708,7 +750,7 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg + // Subsetf asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") ++// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") + func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -718,7 +760,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args + + // Truef asserts that the specified value is true. + // +-// assert.Truef(t, myBool, "error message %s", "formatted") ++// assert.Truef(t, myBool, "error message %s", "formatted") + func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -728,7 +770,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + + // WithinDurationf asserts that the two times are within duration delta of each other. + // +-// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") ++// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") + func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -738,7 +780,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim + + // WithinRangef asserts that a time is within a time range (inclusive). + // +-// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") ++// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") + func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go +index 339515b8bfb..b1d94aec53c 100644 +--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go ++++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go +@@ -30,9 +30,9 @@ func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{} + // Contains asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// a.Contains("Hello World", "World") +-// a.Contains(["Hello", "World"], "World") +-// a.Contains({"Hello": "World"}, "Hello") ++// a.Contains("Hello World", "World") ++// a.Contains(["Hello", "World"], "World") ++// a.Contains({"Hello": "World"}, "Hello") + func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -43,9 +43,9 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs .. + // Containsf asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// a.Containsf("Hello World", "World", "error message %s", "formatted") +-// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +-// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") ++// a.Containsf("Hello World", "World", "error message %s", "formatted") ++// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") ++// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") + func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -98,7 +98,7 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st + // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// a.Empty(obj) ++// a.Empty(obj) + func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -109,7 +109,7 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// a.Emptyf(obj, "error message %s", "formatted") ++// a.Emptyf(obj, "error message %s", "formatted") + func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -119,7 +119,7 @@ func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) + + // Equal asserts that two objects are equal. + // +-// a.Equal(123, 123) ++// a.Equal(123, 123) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -134,8 +134,8 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs + // EqualError asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// a.EqualError(err, expectedErrorString) ++// actualObj, err := SomeFunction() ++// a.EqualError(err, expectedErrorString) + func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -146,8 +146,8 @@ func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ... + // EqualErrorf asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") + func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -155,10 +155,44 @@ func (a *Assertions) EqualErrorf(theError error, errString string, msg string, a + return EqualErrorf(a.t, theError, errString, msg, args...) + } + ++// EqualExportedValues asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// a.EqualExportedValues(S{1, 2}, S{1, 3}) => true ++// a.EqualExportedValues(S{1, 2}, S{2, 3}) => false ++func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ return EqualExportedValues(a.t, expected, actual, msgAndArgs...) ++} ++ ++// EqualExportedValuesf asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true ++// a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false ++func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ return EqualExportedValuesf(a.t, expected, actual, msg, args...) ++} ++ + // EqualValues asserts that two objects are equal or convertable to the same types + // and equal. + // +-// a.EqualValues(uint32(123), int32(123)) ++// a.EqualValues(uint32(123), int32(123)) + func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -169,7 +203,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn + // EqualValuesf asserts that two objects are equal or convertable to the same types + // and equal. + // +-// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") ++// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") + func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -179,7 +213,7 @@ func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg + + // Equalf asserts that two objects are equal. + // +-// a.Equalf(123, 123, "error message %s", "formatted") ++// a.Equalf(123, 123, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -193,10 +227,10 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string + + // Error asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.Error(err) { +-// assert.Equal(t, expectedError, err) +-// } ++// actualObj, err := SomeFunction() ++// if a.Error(err) { ++// assert.Equal(t, expectedError, err) ++// } + func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -225,8 +259,8 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. + // ErrorContains asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// a.ErrorContains(err, expectedErrorSubString) ++// actualObj, err := SomeFunction() ++// a.ErrorContains(err, expectedErrorSubString) + func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -237,8 +271,8 @@ func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs . + // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") + func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -266,10 +300,10 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter + + // Errorf asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.Errorf(err, "error message %s", "formatted") { +-// assert.Equal(t, expectedErrorf, err) +-// } ++// actualObj, err := SomeFunction() ++// if a.Errorf(err, "error message %s", "formatted") { ++// assert.Equal(t, expectedErrorf, err) ++// } + func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -280,7 +314,7 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + // Eventually asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) ++// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) + func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -288,10 +322,60 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti + return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) + } + ++// EventuallyWithT asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// a.EventuallyWithT(func(c *assert.CollectT) { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ return EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...) ++} ++ ++// EventuallyWithTf asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ return EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...) ++} ++ + // Eventuallyf asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -301,7 +385,7 @@ func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, t + + // Exactly asserts that two objects are equal in value and type. + // +-// a.Exactly(int32(123), int64(123)) ++// a.Exactly(int32(123), int64(123)) + func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -311,7 +395,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg + + // Exactlyf asserts that two objects are equal in value and type. + // +-// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") ++// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") + func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -353,7 +437,7 @@ func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{ + + // False asserts that the specified value is false. + // +-// a.False(myBool) ++// a.False(myBool) + func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -363,7 +447,7 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + + // Falsef asserts that the specified value is false. + // +-// a.Falsef(myBool, "error message %s", "formatted") ++// a.Falsef(myBool, "error message %s", "formatted") + func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -391,9 +475,9 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) b + + // Greater asserts that the first element is greater than the second + // +-// a.Greater(2, 1) +-// a.Greater(float64(2), float64(1)) +-// a.Greater("b", "a") ++// a.Greater(2, 1) ++// a.Greater(float64(2), float64(1)) ++// a.Greater("b", "a") + func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -403,10 +487,10 @@ func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...inter + + // GreaterOrEqual asserts that the first element is greater than or equal to the second + // +-// a.GreaterOrEqual(2, 1) +-// a.GreaterOrEqual(2, 2) +-// a.GreaterOrEqual("b", "a") +-// a.GreaterOrEqual("b", "b") ++// a.GreaterOrEqual(2, 1) ++// a.GreaterOrEqual(2, 2) ++// a.GreaterOrEqual("b", "a") ++// a.GreaterOrEqual("b", "b") + func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -416,10 +500,10 @@ func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs . + + // GreaterOrEqualf asserts that the first element is greater than or equal to the second + // +-// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +-// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +-// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +-// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") ++// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") ++// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") ++// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") ++// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") + func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -429,9 +513,9 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, + + // Greaterf asserts that the first element is greater than the second + // +-// a.Greaterf(2, 1, "error message %s", "formatted") +-// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") +-// a.Greaterf("b", "a", "error message %s", "formatted") ++// a.Greaterf(2, 1, "error message %s", "formatted") ++// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") ++// a.Greaterf("b", "a", "error message %s", "formatted") + func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -442,7 +526,7 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args . + // HTTPBodyContains asserts that a specified handler returns a + // body that contains a string. + // +-// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { +@@ -455,7 +539,7 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u + // HTTPBodyContainsf asserts that a specified handler returns a + // body that contains a string. + // +-// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { +@@ -468,7 +552,7 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, + // HTTPBodyNotContains asserts that a specified handler returns a + // body that does not contain a string. + // +-// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { +@@ -481,7 +565,7 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string + // HTTPBodyNotContainsf asserts that a specified handler returns a + // body that does not contain a string. + // +-// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { +@@ -493,7 +577,7 @@ func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method strin + + // HTTPError asserts that a specified handler returns an error status code. + // +-// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -505,7 +589,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri + + // HTTPErrorf asserts that a specified handler returns an error status code. + // +-// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -517,7 +601,7 @@ func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url str + + // HTTPRedirect asserts that a specified handler returns a redirect status code. + // +-// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -529,7 +613,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s + + // HTTPRedirectf asserts that a specified handler returns a redirect status code. + // +-// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -541,7 +625,7 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url + + // HTTPStatusCode asserts that a specified handler returns a specified status code. + // +-// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) ++// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { +@@ -553,7 +637,7 @@ func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url + + // HTTPStatusCodef asserts that a specified handler returns a specified status code. + // +-// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") ++// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { +@@ -565,7 +649,7 @@ func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, ur + + // HTTPSuccess asserts that a specified handler returns a success status code. + // +-// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) ++// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -577,7 +661,7 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st + + // HTTPSuccessf asserts that a specified handler returns a success status code. + // +-// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") ++// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { +@@ -589,7 +673,7 @@ func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url s + + // Implements asserts that an object is implemented by the specified interface. + // +-// a.Implements((*MyInterface)(nil), new(MyObject)) ++// a.Implements((*MyInterface)(nil), new(MyObject)) + func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -599,7 +683,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, + + // Implementsf asserts that an object is implemented by the specified interface. + // +-// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") ++// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") + func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -609,7 +693,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} + + // InDelta asserts that the two numerals are within delta of each other. + // +-// a.InDelta(math.Pi, 22/7.0, 0.01) ++// a.InDelta(math.Pi, 22/7.0, 0.01) + func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -651,7 +735,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del + + // InDeltaf asserts that the two numerals are within delta of each other. + // +-// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") ++// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") + func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -693,9 +777,9 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo + + // IsDecreasing asserts that the collection is decreasing + // +-// a.IsDecreasing([]int{2, 1, 0}) +-// a.IsDecreasing([]float{2, 1}) +-// a.IsDecreasing([]string{"b", "a"}) ++// a.IsDecreasing([]int{2, 1, 0}) ++// a.IsDecreasing([]float{2, 1}) ++// a.IsDecreasing([]string{"b", "a"}) + func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -705,9 +789,9 @@ func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) + + // IsDecreasingf asserts that the collection is decreasing + // +-// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +-// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +-// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") ++// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") ++// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") ++// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") + func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -717,9 +801,9 @@ func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...inter + + // IsIncreasing asserts that the collection is increasing + // +-// a.IsIncreasing([]int{1, 2, 3}) +-// a.IsIncreasing([]float{1, 2}) +-// a.IsIncreasing([]string{"a", "b"}) ++// a.IsIncreasing([]int{1, 2, 3}) ++// a.IsIncreasing([]float{1, 2}) ++// a.IsIncreasing([]string{"a", "b"}) + func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -729,9 +813,9 @@ func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) + + // IsIncreasingf asserts that the collection is increasing + // +-// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +-// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +-// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") ++// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") ++// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") ++// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") + func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -741,9 +825,9 @@ func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...inter + + // IsNonDecreasing asserts that the collection is not decreasing + // +-// a.IsNonDecreasing([]int{1, 1, 2}) +-// a.IsNonDecreasing([]float{1, 2}) +-// a.IsNonDecreasing([]string{"a", "b"}) ++// a.IsNonDecreasing([]int{1, 1, 2}) ++// a.IsNonDecreasing([]float{1, 2}) ++// a.IsNonDecreasing([]string{"a", "b"}) + func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -753,9 +837,9 @@ func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface + + // IsNonDecreasingf asserts that the collection is not decreasing + // +-// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +-// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +-// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") + func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -765,9 +849,9 @@ func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...in + + // IsNonIncreasing asserts that the collection is not increasing + // +-// a.IsNonIncreasing([]int{2, 1, 1}) +-// a.IsNonIncreasing([]float{2, 1}) +-// a.IsNonIncreasing([]string{"b", "a"}) ++// a.IsNonIncreasing([]int{2, 1, 1}) ++// a.IsNonIncreasing([]float{2, 1}) ++// a.IsNonIncreasing([]string{"b", "a"}) + func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -777,9 +861,9 @@ func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface + + // IsNonIncreasingf asserts that the collection is not increasing + // +-// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +-// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +-// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") + func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -805,7 +889,7 @@ func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg s + + // JSONEq asserts that two JSON strings are equivalent. + // +-// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) ++// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -815,7 +899,7 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf + + // JSONEqf asserts that two JSON strings are equivalent. + // +-// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") ++// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") + func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -826,7 +910,7 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. + // Len asserts that the specified object has specific length. + // Len also fails if the object has a type that len() not accept. + // +-// a.Len(mySlice, 3) ++// a.Len(mySlice, 3) + func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -837,7 +921,7 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface + // Lenf asserts that the specified object has specific length. + // Lenf also fails if the object has a type that len() not accept. + // +-// a.Lenf(mySlice, 3, "error message %s", "formatted") ++// a.Lenf(mySlice, 3, "error message %s", "formatted") + func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -847,9 +931,9 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in + + // Less asserts that the first element is less than the second + // +-// a.Less(1, 2) +-// a.Less(float64(1), float64(2)) +-// a.Less("a", "b") ++// a.Less(1, 2) ++// a.Less(float64(1), float64(2)) ++// a.Less("a", "b") + func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -859,10 +943,10 @@ func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interfac + + // LessOrEqual asserts that the first element is less than or equal to the second + // +-// a.LessOrEqual(1, 2) +-// a.LessOrEqual(2, 2) +-// a.LessOrEqual("a", "b") +-// a.LessOrEqual("b", "b") ++// a.LessOrEqual(1, 2) ++// a.LessOrEqual(2, 2) ++// a.LessOrEqual("a", "b") ++// a.LessOrEqual("b", "b") + func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -872,10 +956,10 @@ func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...i + + // LessOrEqualf asserts that the first element is less than or equal to the second + // +-// a.LessOrEqualf(1, 2, "error message %s", "formatted") +-// a.LessOrEqualf(2, 2, "error message %s", "formatted") +-// a.LessOrEqualf("a", "b", "error message %s", "formatted") +-// a.LessOrEqualf("b", "b", "error message %s", "formatted") ++// a.LessOrEqualf(1, 2, "error message %s", "formatted") ++// a.LessOrEqualf(2, 2, "error message %s", "formatted") ++// a.LessOrEqualf("a", "b", "error message %s", "formatted") ++// a.LessOrEqualf("b", "b", "error message %s", "formatted") + func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -885,9 +969,9 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar + + // Lessf asserts that the first element is less than the second + // +-// a.Lessf(1, 2, "error message %s", "formatted") +-// a.Lessf(float64(1), float64(2), "error message %s", "formatted") +-// a.Lessf("a", "b", "error message %s", "formatted") ++// a.Lessf(1, 2, "error message %s", "formatted") ++// a.Lessf(float64(1), float64(2), "error message %s", "formatted") ++// a.Lessf("a", "b", "error message %s", "formatted") + func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -897,8 +981,8 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i + + // Negative asserts that the specified element is negative + // +-// a.Negative(-1) +-// a.Negative(-1.23) ++// a.Negative(-1) ++// a.Negative(-1.23) + func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -908,8 +992,8 @@ func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { + + // Negativef asserts that the specified element is negative + // +-// a.Negativef(-1, "error message %s", "formatted") +-// a.Negativef(-1.23, "error message %s", "formatted") ++// a.Negativef(-1, "error message %s", "formatted") ++// a.Negativef(-1.23, "error message %s", "formatted") + func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -920,7 +1004,7 @@ func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) b + // Never asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) ++// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) + func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -931,7 +1015,7 @@ func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick ti + // Neverf asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -941,7 +1025,7 @@ func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick t + + // Nil asserts that the specified object is nil. + // +-// a.Nil(err) ++// a.Nil(err) + func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -951,7 +1035,7 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + + // Nilf asserts that the specified object is nil. + // +-// a.Nilf(err, "error message %s", "formatted") ++// a.Nilf(err, "error message %s", "formatted") + func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -979,10 +1063,10 @@ func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) + + // NoError asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.NoError(err) { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if a.NoError(err) { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -992,10 +1076,10 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + + // NoErrorf asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.NoErrorf(err, "error message %s", "formatted") { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if a.NoErrorf(err, "error message %s", "formatted") { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1024,9 +1108,9 @@ func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// a.NotContains("Hello World", "Earth") +-// a.NotContains(["Hello", "World"], "Earth") +-// a.NotContains({"Hello": "World"}, "Earth") ++// a.NotContains("Hello World", "Earth") ++// a.NotContains(["Hello", "World"], "Earth") ++// a.NotContains({"Hello": "World"}, "Earth") + func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1037,9 +1121,9 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs + // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +-// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +-// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") ++// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") ++// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") ++// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") + func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1050,9 +1134,9 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if a.NotEmpty(obj) { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if a.NotEmpty(obj) { ++// assert.Equal(t, "two", obj[1]) ++// } + func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1063,9 +1147,9 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if a.NotEmptyf(obj, "error message %s", "formatted") { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if a.NotEmptyf(obj, "error message %s", "formatted") { ++// assert.Equal(t, "two", obj[1]) ++// } + func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1075,7 +1159,7 @@ func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface + + // NotEqual asserts that the specified values are NOT equal. + // +-// a.NotEqual(obj1, obj2) ++// a.NotEqual(obj1, obj2) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1088,7 +1172,7 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr + + // NotEqualValues asserts that two objects are not equal even when converted to the same type + // +-// a.NotEqualValues(obj1, obj2) ++// a.NotEqualValues(obj1, obj2) + func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1098,7 +1182,7 @@ func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, ms + + // NotEqualValuesf asserts that two objects are not equal even when converted to the same type + // +-// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") ++// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") + func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1108,7 +1192,7 @@ func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, m + + // NotEqualf asserts that the specified values are NOT equal. + // +-// a.NotEqualf(obj1, obj2, "error message %s", "formatted") ++// a.NotEqualf(obj1, obj2, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1139,7 +1223,7 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in + + // NotNil asserts that the specified object is not nil. + // +-// a.NotNil(err) ++// a.NotNil(err) + func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1149,7 +1233,7 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool + + // NotNilf asserts that the specified object is not nil. + // +-// a.NotNilf(err, "error message %s", "formatted") ++// a.NotNilf(err, "error message %s", "formatted") + func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1159,7 +1243,7 @@ func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{} + + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// a.NotPanics(func(){ RemainCalm() }) ++// a.NotPanics(func(){ RemainCalm() }) + func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1169,7 +1253,7 @@ func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool + + // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") ++// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") + func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1179,8 +1263,8 @@ func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{} + + // NotRegexp asserts that a specified regexp does not match a string. + // +-// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +-// a.NotRegexp("^start", "it's not starting") ++// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") ++// a.NotRegexp("^start", "it's not starting") + func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1190,8 +1274,8 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in + + // NotRegexpf asserts that a specified regexp does not match a string. + // +-// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +-// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") ++// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") ++// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") + func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1201,7 +1285,7 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg + + // NotSame asserts that two pointers do not reference the same object. + // +-// a.NotSame(ptr1, ptr2) ++// a.NotSame(ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1214,7 +1298,7 @@ func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArg + + // NotSamef asserts that two pointers do not reference the same object. + // +-// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") ++// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1228,7 +1312,7 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri + // NotSubset asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") ++// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") + func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1239,7 +1323,7 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs + // NotSubsetf asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") ++// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") + func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1265,7 +1349,7 @@ func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bo + + // Panics asserts that the code inside the specified PanicTestFunc panics. + // +-// a.Panics(func(){ GoCrazy() }) ++// a.Panics(func(){ GoCrazy() }) + func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1277,7 +1361,7 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// a.PanicsWithError("crazy error", func(){ GoCrazy() }) ++// a.PanicsWithError("crazy error", func(){ GoCrazy() }) + func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1289,7 +1373,7 @@ func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndAr + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1300,7 +1384,7 @@ func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg str + // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) ++// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) + func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1311,7 +1395,7 @@ func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgA + // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1321,7 +1405,7 @@ func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg + + // Panicsf asserts that the code inside the specified PanicTestFunc panics. + // +-// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") ++// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1331,8 +1415,8 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b + + // Positive asserts that the specified element is positive + // +-// a.Positive(1) +-// a.Positive(1.23) ++// a.Positive(1) ++// a.Positive(1.23) + func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1342,8 +1426,8 @@ func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { + + // Positivef asserts that the specified element is positive + // +-// a.Positivef(1, "error message %s", "formatted") +-// a.Positivef(1.23, "error message %s", "formatted") ++// a.Positivef(1, "error message %s", "formatted") ++// a.Positivef(1.23, "error message %s", "formatted") + func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1353,8 +1437,8 @@ func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) b + + // Regexp asserts that a specified regexp matches a string. + // +-// a.Regexp(regexp.MustCompile("start"), "it's starting") +-// a.Regexp("start...$", "it's not starting") ++// a.Regexp(regexp.MustCompile("start"), "it's starting") ++// a.Regexp("start...$", "it's not starting") + func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1364,8 +1448,8 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter + + // Regexpf asserts that a specified regexp matches a string. + // +-// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +-// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") ++// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") ++// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") + func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1375,7 +1459,7 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . + + // Same asserts that two pointers reference the same object. + // +-// a.Same(ptr1, ptr2) ++// a.Same(ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1388,7 +1472,7 @@ func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs . + + // Samef asserts that two pointers reference the same object. + // +-// a.Samef(ptr1, ptr2, "error message %s", "formatted") ++// a.Samef(ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1402,7 +1486,7 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, + // Subset asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") ++// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") + func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1413,7 +1497,7 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... + // Subsetf asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") ++// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") + func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1423,7 +1507,7 @@ func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, a + + // True asserts that the specified value is true. + // +-// a.True(myBool) ++// a.True(myBool) + func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1433,7 +1517,7 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + + // Truef asserts that the specified value is true. + // +-// a.Truef(myBool, "error message %s", "formatted") ++// a.Truef(myBool, "error message %s", "formatted") + func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1443,7 +1527,7 @@ func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + + // WithinDuration asserts that the two times are within duration delta of each other. + // +-// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) ++// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) + func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1453,7 +1537,7 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta + + // WithinDurationf asserts that the two times are within duration delta of each other. + // +-// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") ++// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") + func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1463,7 +1547,7 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta + + // WithinRange asserts that a time is within a time range (inclusive). + // +-// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) ++// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) + func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1473,7 +1557,7 @@ func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Tim + + // WithinRangef asserts that a time is within a time range (inclusive). + // +-// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") ++// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") + func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() +diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go +index 75944878358..00df62a0599 100644 +--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go ++++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go +@@ -46,36 +46,36 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT + + // IsIncreasing asserts that the collection is increasing + // +-// assert.IsIncreasing(t, []int{1, 2, 3}) +-// assert.IsIncreasing(t, []float{1, 2}) +-// assert.IsIncreasing(t, []string{"a", "b"}) ++// assert.IsIncreasing(t, []int{1, 2, 3}) ++// assert.IsIncreasing(t, []float{1, 2}) ++// assert.IsIncreasing(t, []string{"a", "b"}) + func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + } + + // IsNonIncreasing asserts that the collection is not increasing + // +-// assert.IsNonIncreasing(t, []int{2, 1, 1}) +-// assert.IsNonIncreasing(t, []float{2, 1}) +-// assert.IsNonIncreasing(t, []string{"b", "a"}) ++// assert.IsNonIncreasing(t, []int{2, 1, 1}) ++// assert.IsNonIncreasing(t, []float{2, 1}) ++// assert.IsNonIncreasing(t, []string{"b", "a"}) + func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + } + + // IsDecreasing asserts that the collection is decreasing + // +-// assert.IsDecreasing(t, []int{2, 1, 0}) +-// assert.IsDecreasing(t, []float{2, 1}) +-// assert.IsDecreasing(t, []string{"b", "a"}) ++// assert.IsDecreasing(t, []int{2, 1, 0}) ++// assert.IsDecreasing(t, []float{2, 1}) ++// assert.IsDecreasing(t, []string{"b", "a"}) + func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + } + + // IsNonDecreasing asserts that the collection is not decreasing + // +-// assert.IsNonDecreasing(t, []int{1, 1, 2}) +-// assert.IsNonDecreasing(t, []float{1, 2}) +-// assert.IsNonDecreasing(t, []string{"a", "b"}) ++// assert.IsNonDecreasing(t, []int{1, 1, 2}) ++// assert.IsNonDecreasing(t, []float{1, 2}) ++// assert.IsNonDecreasing(t, []string{"a", "b"}) + func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + } +diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go +index fa1245b1897..a55d1bba926 100644 +--- a/vendor/github.com/stretchr/testify/assert/assertions.go ++++ b/vendor/github.com/stretchr/testify/assert/assertions.go +@@ -8,7 +8,6 @@ import ( + "fmt" + "math" + "os" +- "path/filepath" + "reflect" + "regexp" + "runtime" +@@ -76,6 +75,77 @@ func ObjectsAreEqual(expected, actual interface{}) bool { + return bytes.Equal(exp, act) + } + ++// copyExportedFields iterates downward through nested data structures and creates a copy ++// that only contains the exported struct fields. ++func copyExportedFields(expected interface{}) interface{} { ++ if isNil(expected) { ++ return expected ++ } ++ ++ expectedType := reflect.TypeOf(expected) ++ expectedKind := expectedType.Kind() ++ expectedValue := reflect.ValueOf(expected) ++ ++ switch expectedKind { ++ case reflect.Struct: ++ result := reflect.New(expectedType).Elem() ++ for i := 0; i < expectedType.NumField(); i++ { ++ field := expectedType.Field(i) ++ isExported := field.IsExported() ++ if isExported { ++ fieldValue := expectedValue.Field(i) ++ if isNil(fieldValue) || isNil(fieldValue.Interface()) { ++ continue ++ } ++ newValue := copyExportedFields(fieldValue.Interface()) ++ result.Field(i).Set(reflect.ValueOf(newValue)) ++ } ++ } ++ return result.Interface() ++ ++ case reflect.Ptr: ++ result := reflect.New(expectedType.Elem()) ++ unexportedRemoved := copyExportedFields(expectedValue.Elem().Interface()) ++ result.Elem().Set(reflect.ValueOf(unexportedRemoved)) ++ return result.Interface() ++ ++ case reflect.Array, reflect.Slice: ++ result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) ++ for i := 0; i < expectedValue.Len(); i++ { ++ index := expectedValue.Index(i) ++ if isNil(index) { ++ continue ++ } ++ unexportedRemoved := copyExportedFields(index.Interface()) ++ result.Index(i).Set(reflect.ValueOf(unexportedRemoved)) ++ } ++ return result.Interface() ++ ++ case reflect.Map: ++ result := reflect.MakeMap(expectedType) ++ for _, k := range expectedValue.MapKeys() { ++ index := expectedValue.MapIndex(k) ++ unexportedRemoved := copyExportedFields(index.Interface()) ++ result.SetMapIndex(k, reflect.ValueOf(unexportedRemoved)) ++ } ++ return result.Interface() ++ ++ default: ++ return expected ++ } ++} ++ ++// ObjectsExportedFieldsAreEqual determines if the exported (public) fields of two objects are ++// considered equal. This comparison of only exported fields is applied recursively to nested data ++// structures. ++// ++// This function does no assertion of any kind. ++func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { ++ expectedCleaned := copyExportedFields(expected) ++ actualCleaned := copyExportedFields(actual) ++ return ObjectsAreEqualValues(expectedCleaned, actualCleaned) ++} ++ + // ObjectsAreEqualValues gets whether two objects are equal, or if their + // values are equal. + func ObjectsAreEqualValues(expected, actual interface{}) bool { +@@ -141,12 +211,11 @@ func CallerInfo() []string { + } + + parts := strings.Split(file, "/") +- file = parts[len(parts)-1] + if len(parts) > 1 { ++ filename := parts[len(parts)-1] + dir := parts[len(parts)-2] +- if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { +- path, _ := filepath.Abs(file) +- callers = append(callers, fmt.Sprintf("%s:%d", path, line)) ++ if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { ++ callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + +@@ -273,7 +342,7 @@ type labeledContent struct { + + // labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: + // +-// \t{{label}}:{{align_spaces}}\t{{content}}\n ++// \t{{label}}:{{align_spaces}}\t{{content}}\n + // + // The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. + // If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this +@@ -296,7 +365,7 @@ func labeledOutput(content ...labeledContent) string { + + // Implements asserts that an object is implemented by the specified interface. + // +-// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) ++// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) + func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -328,7 +397,7 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs + + // Equal asserts that two objects are equal. + // +-// assert.Equal(t, 123, 123) ++// assert.Equal(t, 123, 123) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -369,7 +438,7 @@ func validateEqualArgs(expected, actual interface{}) error { + + // Same asserts that two pointers reference the same object. + // +-// assert.Same(t, ptr1, ptr2) ++// assert.Same(t, ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -389,7 +458,7 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b + + // NotSame asserts that two pointers do not reference the same object. + // +-// assert.NotSame(t, ptr1, ptr2) ++// assert.NotSame(t, ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -457,7 +526,7 @@ func truncatingFormat(data interface{}) string { + // EqualValues asserts that two objects are equal or convertable to the same types + // and equal. + // +-// assert.EqualValues(t, uint32(123), int32(123)) ++// assert.EqualValues(t, uint32(123), int32(123)) + func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -475,9 +544,53 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa + + } + ++// EqualExportedValues asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true ++// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false ++func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ ++ aType := reflect.TypeOf(expected) ++ bType := reflect.TypeOf(actual) ++ ++ if aType != bType { ++ return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) ++ } ++ ++ if aType.Kind() != reflect.Struct { ++ return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) ++ } ++ ++ if bType.Kind() != reflect.Struct { ++ return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) ++ } ++ ++ expected = copyExportedFields(expected) ++ actual = copyExportedFields(actual) ++ ++ if !ObjectsAreEqualValues(expected, actual) { ++ diff := diff(expected, actual) ++ expected, actual = formatUnequalValues(expected, actual) ++ return Fail(t, fmt.Sprintf("Not equal (comparing only exported fields): \n"+ ++ "expected: %s\n"+ ++ "actual : %s%s", expected, actual, diff), msgAndArgs...) ++ } ++ ++ return true ++} ++ + // Exactly asserts that two objects are equal in value and type. + // +-// assert.Exactly(t, int32(123), int64(123)) ++// assert.Exactly(t, int32(123), int64(123)) + func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -496,7 +609,7 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} + + // NotNil asserts that the specified object is not nil. + // +-// assert.NotNil(t, err) ++// assert.NotNil(t, err) + func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if !isNil(object) { + return true +@@ -530,7 +643,7 @@ func isNil(object interface{}) bool { + []reflect.Kind{ + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, +- reflect.Ptr, reflect.Slice}, ++ reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, + kind) + + if isNilableKind && value.IsNil() { +@@ -542,7 +655,7 @@ func isNil(object interface{}) bool { + + // Nil asserts that the specified object is nil. + // +-// assert.Nil(t, err) ++// assert.Nil(t, err) + func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if isNil(object) { + return true +@@ -585,7 +698,7 @@ func isEmpty(object interface{}) bool { + // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// assert.Empty(t, obj) ++// assert.Empty(t, obj) + func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + pass := isEmpty(object) + if !pass { +@@ -602,9 +715,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if assert.NotEmpty(t, obj) { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if assert.NotEmpty(t, obj) { ++// assert.Equal(t, "two", obj[1]) ++// } + func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + pass := !isEmpty(object) + if !pass { +@@ -633,7 +746,7 @@ func getLen(x interface{}) (ok bool, length int) { + // Len asserts that the specified object has specific length. + // Len also fails if the object has a type that len() not accept. + // +-// assert.Len(t, mySlice, 3) ++// assert.Len(t, mySlice, 3) + func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -651,7 +764,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) + + // True asserts that the specified value is true. + // +-// assert.True(t, myBool) ++// assert.True(t, myBool) + func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if !value { + if h, ok := t.(tHelper); ok { +@@ -666,7 +779,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + // False asserts that the specified value is false. + // +-// assert.False(t, myBool) ++// assert.False(t, myBool) + func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if value { + if h, ok := t.(tHelper); ok { +@@ -681,7 +794,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + // NotEqual asserts that the specified values are NOT equal. + // +-// assert.NotEqual(t, obj1, obj2) ++// assert.NotEqual(t, obj1, obj2) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -704,7 +817,7 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ + + // NotEqualValues asserts that two objects are not equal even when converted to the same type + // +-// assert.NotEqualValues(t, obj1, obj2) ++// assert.NotEqualValues(t, obj1, obj2) + func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -763,9 +876,9 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { + // Contains asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// assert.Contains(t, "Hello World", "World") +-// assert.Contains(t, ["Hello", "World"], "World") +-// assert.Contains(t, {"Hello": "World"}, "Hello") ++// assert.Contains(t, "Hello World", "World") ++// assert.Contains(t, ["Hello", "World"], "World") ++// assert.Contains(t, {"Hello": "World"}, "Hello") + func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -786,9 +899,9 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// assert.NotContains(t, "Hello World", "Earth") +-// assert.NotContains(t, ["Hello", "World"], "Earth") +-// assert.NotContains(t, {"Hello": "World"}, "Earth") ++// assert.NotContains(t, "Hello World", "Earth") ++// assert.NotContains(t, ["Hello", "World"], "Earth") ++// assert.NotContains(t, {"Hello": "World"}, "Earth") + func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -796,10 +909,10 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) + + ok, found := containsElement(s, contains) + if !ok { +- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) ++ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) + } + if found { +- return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) ++ return Fail(t, fmt.Sprintf("%#v should not contain %#v", s, contains), msgAndArgs...) + } + + return true +@@ -809,7 +922,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) + // Subset asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") ++// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") + func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -818,49 +931,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok + return true // we consider nil to be equal to the nil set + } + +- defer func() { +- if e := recover(); e != nil { +- ok = false +- } +- }() +- + listKind := reflect.TypeOf(list).Kind() +- subsetKind := reflect.TypeOf(subset).Kind() +- + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + ++ subsetKind := reflect.TypeOf(subset).Kind() + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + +- subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { +- listValue := reflect.ValueOf(list) +- subsetKeys := subsetValue.MapKeys() ++ subsetMap := reflect.ValueOf(subset) ++ actualMap := reflect.ValueOf(list) + +- for i := 0; i < len(subsetKeys); i++ { +- subsetKey := subsetKeys[i] +- subsetElement := subsetValue.MapIndex(subsetKey).Interface() +- listElement := listValue.MapIndex(subsetKey).Interface() ++ for _, k := range subsetMap.MapKeys() { ++ ev := subsetMap.MapIndex(k) ++ av := actualMap.MapIndex(k) + +- if !ObjectsAreEqual(subsetElement, listElement) { +- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) ++ if !av.IsValid() { ++ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) ++ } ++ if !ObjectsAreEqual(ev.Interface(), av.Interface()) { ++ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) + } + } + + return true + } + +- for i := 0; i < subsetValue.Len(); i++ { +- element := subsetValue.Index(i).Interface() ++ subsetList := reflect.ValueOf(subset) ++ for i := 0; i < subsetList.Len(); i++ { ++ element := subsetList.Index(i).Interface() + ok, found := containsElement(list, element) + if !ok { +- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) ++ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...) + } + if !found { +- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) ++ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...) + } + } + +@@ -870,7 +978,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok + // NotSubset asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") ++// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") + func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -879,34 +987,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) + } + +- defer func() { +- if e := recover(); e != nil { +- ok = false +- } +- }() +- + listKind := reflect.TypeOf(list).Kind() +- subsetKind := reflect.TypeOf(subset).Kind() +- + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + ++ subsetKind := reflect.TypeOf(subset).Kind() + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + +- subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { +- listValue := reflect.ValueOf(list) +- subsetKeys := subsetValue.MapKeys() ++ subsetMap := reflect.ValueOf(subset) ++ actualMap := reflect.ValueOf(list) + +- for i := 0; i < len(subsetKeys); i++ { +- subsetKey := subsetKeys[i] +- subsetElement := subsetValue.MapIndex(subsetKey).Interface() +- listElement := listValue.MapIndex(subsetKey).Interface() ++ for _, k := range subsetMap.MapKeys() { ++ ev := subsetMap.MapIndex(k) ++ av := actualMap.MapIndex(k) + +- if !ObjectsAreEqual(subsetElement, listElement) { ++ if !av.IsValid() { ++ return true ++ } ++ if !ObjectsAreEqual(ev.Interface(), av.Interface()) { + return true + } + } +@@ -914,8 +1016,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) + } + +- for i := 0; i < subsetValue.Len(); i++ { +- element := subsetValue.Index(i).Interface() ++ subsetList := reflect.ValueOf(subset) ++ for i := 0; i < subsetList.Len(); i++ { ++ element := subsetList.Index(i).Interface() + ok, found := containsElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) +@@ -1060,7 +1163,7 @@ func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string + + // Panics asserts that the code inside the specified PanicTestFunc panics. + // +-// assert.Panics(t, func(){ GoCrazy() }) ++// assert.Panics(t, func(){ GoCrazy() }) + func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1076,7 +1179,7 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) ++// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) + func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1097,7 +1200,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) ++// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) + func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1117,7 +1220,7 @@ func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs . + + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// assert.NotPanics(t, func(){ RemainCalm() }) ++// assert.NotPanics(t, func(){ RemainCalm() }) + func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1132,7 +1235,7 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + // WithinDuration asserts that the two times are within duration delta of each other. + // +-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) ++// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) + func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1148,7 +1251,7 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, + + // WithinRange asserts that a time is within a time range (inclusive). + // +-// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) ++// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) + func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1207,7 +1310,7 @@ func toFloat(x interface{}) (float64, bool) { + + // InDelta asserts that the two numerals are within delta of each other. + // +-// assert.InDelta(t, math.Pi, 22/7.0, 0.01) ++// assert.InDelta(t, math.Pi, 22/7.0, 0.01) + func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1380,10 +1483,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m + + // NoError asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.NoError(t, err) { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if assert.NoError(t, err) { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if err != nil { + if h, ok := t.(tHelper); ok { +@@ -1397,10 +1500,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + + // Error asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.Error(t, err) { +-// assert.Equal(t, expectedError, err) +-// } ++// actualObj, err := SomeFunction() ++// if assert.Error(t, err) { ++// assert.Equal(t, expectedError, err) ++// } + func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + if err == nil { + if h, ok := t.(tHelper); ok { +@@ -1415,8 +1518,8 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + // EqualError asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// assert.EqualError(t, err, expectedErrorString) ++// actualObj, err := SomeFunction() ++// assert.EqualError(t, err, expectedErrorString) + func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1438,8 +1541,8 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte + // ErrorContains asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// assert.ErrorContains(t, err, expectedErrorSubString) ++// actualObj, err := SomeFunction() ++// assert.ErrorContains(t, err, expectedErrorSubString) + func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1472,8 +1575,8 @@ func matchRegexp(rx interface{}, str interface{}) bool { + + // Regexp asserts that a specified regexp matches a string. + // +-// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +-// assert.Regexp(t, "start...$", "it's not starting") ++// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") ++// assert.Regexp(t, "start...$", "it's not starting") + func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1490,8 +1593,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface + + // NotRegexp asserts that a specified regexp does not match a string. + // +-// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +-// assert.NotRegexp(t, "^start", "it's not starting") ++// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") ++// assert.NotRegexp(t, "^start", "it's not starting") + func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1603,7 +1706,7 @@ func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + + // JSONEq asserts that two JSON strings are equivalent. + // +-// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) ++// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1726,7 +1829,7 @@ type tHelper interface { + // Eventually asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) ++// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) + func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1756,10 +1859,93 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t + } + } + ++// CollectT implements the TestingT interface and collects all errors. ++type CollectT struct { ++ errors []error ++} ++ ++// Errorf collects the error. ++func (c *CollectT) Errorf(format string, args ...interface{}) { ++ c.errors = append(c.errors, fmt.Errorf(format, args...)) ++} ++ ++// FailNow panics. ++func (c *CollectT) FailNow() { ++ panic("Assertion failed") ++} ++ ++// Reset clears the collected errors. ++func (c *CollectT) Reset() { ++ c.errors = nil ++} ++ ++// Copy copies the collected errors to the supplied t. ++func (c *CollectT) Copy(t TestingT) { ++ if tt, ok := t.(tHelper); ok { ++ tt.Helper() ++ } ++ for _, err := range c.errors { ++ t.Errorf("%v", err) ++ } ++} ++ ++// EventuallyWithT asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// assert.EventuallyWithT(t, func(c *assert.CollectT) { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ ++ collect := new(CollectT) ++ ch := make(chan bool, 1) ++ ++ timer := time.NewTimer(waitFor) ++ defer timer.Stop() ++ ++ ticker := time.NewTicker(tick) ++ defer ticker.Stop() ++ ++ for tick := ticker.C; ; { ++ select { ++ case <-timer.C: ++ collect.Copy(t) ++ return Fail(t, "Condition never satisfied", msgAndArgs...) ++ case <-tick: ++ tick = nil ++ collect.Reset() ++ go func() { ++ condition(collect) ++ ch <- len(collect.errors) == 0 ++ }() ++ case v := <-ch: ++ if v { ++ return true ++ } ++ tick = ticker.C ++ } ++ } ++} ++ + // Never asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) ++// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) + func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() +diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go +index c9dccc4d6cd..4953981d387 100644 +--- a/vendor/github.com/stretchr/testify/assert/doc.go ++++ b/vendor/github.com/stretchr/testify/assert/doc.go +@@ -1,39 +1,40 @@ + // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. + // +-// Example Usage ++// # Example Usage + // + // The following is a complete example using assert in a standard test function: +-// import ( +-// "testing" +-// "github.com/stretchr/testify/assert" +-// ) + // +-// func TestSomething(t *testing.T) { ++// import ( ++// "testing" ++// "github.com/stretchr/testify/assert" ++// ) + // +-// var a string = "Hello" +-// var b string = "Hello" ++// func TestSomething(t *testing.T) { + // +-// assert.Equal(t, a, b, "The two words should be the same.") ++// var a string = "Hello" ++// var b string = "Hello" + // +-// } ++// assert.Equal(t, a, b, "The two words should be the same.") ++// ++// } + // + // if you assert many times, use the format below: + // +-// import ( +-// "testing" +-// "github.com/stretchr/testify/assert" +-// ) ++// import ( ++// "testing" ++// "github.com/stretchr/testify/assert" ++// ) + // +-// func TestSomething(t *testing.T) { +-// assert := assert.New(t) ++// func TestSomething(t *testing.T) { ++// assert := assert.New(t) + // +-// var a string = "Hello" +-// var b string = "Hello" ++// var a string = "Hello" ++// var b string = "Hello" + // +-// assert.Equal(a, b, "The two words should be the same.") +-// } ++// assert.Equal(a, b, "The two words should be the same.") ++// } + // +-// Assertions ++// # Assertions + // + // Assertions allow you to easily write test code, and are global funcs in the `assert` package. + // All assertion functions take, as the first argument, the `*testing.T` object provided by the +diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go +index 4ed341dd289..d8038c28a75 100644 +--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go ++++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go +@@ -23,7 +23,7 @@ func httpCode(handler http.HandlerFunc, method, url string, values url.Values) ( + + // HTTPSuccess asserts that a specified handler returns a success status code. + // +-// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) ++// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -45,7 +45,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value + + // HTTPRedirect asserts that a specified handler returns a redirect status code. + // +-// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -67,7 +67,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu + + // HTTPError asserts that a specified handler returns an error status code. + // +-// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { +@@ -89,7 +89,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values + + // HTTPStatusCode asserts that a specified handler returns a specified status code. + // +-// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) ++// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { +@@ -124,7 +124,7 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) s + // HTTPBodyContains asserts that a specified handler returns a + // body that contains a string. + // +-// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { +@@ -144,7 +144,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, + // HTTPBodyNotContains asserts that a specified handler returns a + // body that does not contain a string. + // +-// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { +diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go +index 169de39221c..96843472455 100644 +--- a/vendor/github.com/stretchr/testify/require/doc.go ++++ b/vendor/github.com/stretchr/testify/require/doc.go +@@ -1,24 +1,25 @@ + // Package require implements the same assertions as the `assert` package but + // stops test execution when a test fails. + // +-// Example Usage ++// # Example Usage + // + // The following is a complete example using require in a standard test function: +-// import ( +-// "testing" +-// "github.com/stretchr/testify/require" +-// ) + // +-// func TestSomething(t *testing.T) { ++// import ( ++// "testing" ++// "github.com/stretchr/testify/require" ++// ) + // +-// var a string = "Hello" +-// var b string = "Hello" ++// func TestSomething(t *testing.T) { + // +-// require.Equal(t, a, b, "The two words should be the same.") ++// var a string = "Hello" ++// var b string = "Hello" + // +-// } ++// require.Equal(t, a, b, "The two words should be the same.") + // +-// Assertions ++// } ++// ++// # Assertions + // + // The `require` package have same global functions as in the `assert` package, + // but instead of returning a boolean result they call `t.FailNow()`. +diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go +index 880853f5a2c..63f85214767 100644 +--- a/vendor/github.com/stretchr/testify/require/require.go ++++ b/vendor/github.com/stretchr/testify/require/require.go +@@ -37,9 +37,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac + // Contains asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// assert.Contains(t, "Hello World", "World") +-// assert.Contains(t, ["Hello", "World"], "World") +-// assert.Contains(t, {"Hello": "World"}, "Hello") ++// assert.Contains(t, "Hello World", "World") ++// assert.Contains(t, ["Hello", "World"], "World") ++// assert.Contains(t, {"Hello": "World"}, "Hello") + func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -53,9 +53,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int + // Containsf asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +-// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +-// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") ++// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") ++// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") ++// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") + func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -123,7 +123,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string + // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// assert.Empty(t, obj) ++// assert.Empty(t, obj) + func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -137,7 +137,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// assert.Emptyf(t, obj, "error message %s", "formatted") ++// assert.Emptyf(t, obj, "error message %s", "formatted") + func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -150,7 +150,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + + // Equal asserts that two objects are equal. + // +-// assert.Equal(t, 123, 123) ++// assert.Equal(t, 123, 123) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -168,8 +168,8 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i + // EqualError asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// assert.EqualError(t, err, expectedErrorString) ++// actualObj, err := SomeFunction() ++// assert.EqualError(t, err, expectedErrorString) + func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -183,8 +183,8 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte + // EqualErrorf asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") + func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -195,10 +195,50 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args + t.FailNow() + } + ++// EqualExportedValues asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true ++// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false ++func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ if assert.EqualExportedValues(t, expected, actual, msgAndArgs...) { ++ return ++ } ++ t.FailNow() ++} ++ ++// EqualExportedValuesf asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true ++// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false ++func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ if assert.EqualExportedValuesf(t, expected, actual, msg, args...) { ++ return ++ } ++ t.FailNow() ++} ++ + // EqualValues asserts that two objects are equal or convertable to the same types + // and equal. + // +-// assert.EqualValues(t, uint32(123), int32(123)) ++// assert.EqualValues(t, uint32(123), int32(123)) + func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -212,7 +252,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg + // EqualValuesf asserts that two objects are equal or convertable to the same types + // and equal. + // +-// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") ++// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") + func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -225,7 +265,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri + + // Equalf asserts that two objects are equal. + // +-// assert.Equalf(t, 123, 123, "error message %s", "formatted") ++// assert.Equalf(t, 123, 123, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -242,10 +282,10 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar + + // Error asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.Error(t, err) { +-// assert.Equal(t, expectedError, err) +-// } ++// actualObj, err := SomeFunction() ++// if assert.Error(t, err) { ++// assert.Equal(t, expectedError, err) ++// } + func Error(t TestingT, err error, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -283,8 +323,8 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int + // ErrorContains asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// assert.ErrorContains(t, err, expectedErrorSubString) ++// actualObj, err := SomeFunction() ++// assert.ErrorContains(t, err, expectedErrorSubString) + func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -298,8 +338,8 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in + // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") + func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -336,10 +376,10 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface + + // Errorf asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.Errorf(t, err, "error message %s", "formatted") { +-// assert.Equal(t, expectedErrorf, err) +-// } ++// actualObj, err := SomeFunction() ++// if assert.Errorf(t, err, "error message %s", "formatted") { ++// assert.Equal(t, expectedErrorf, err) ++// } + func Errorf(t TestingT, err error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -353,7 +393,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { + // Eventually asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) ++// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) + func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -364,10 +404,66 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t + t.FailNow() + } + ++// EventuallyWithT asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// assert.EventuallyWithT(t, func(c *assert.CollectT) { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ if assert.EventuallyWithT(t, condition, waitFor, tick, msgAndArgs...) { ++ return ++ } ++ t.FailNow() ++} ++ ++// EventuallyWithTf asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { ++ if h, ok := t.(tHelper); ok { ++ h.Helper() ++ } ++ if assert.EventuallyWithTf(t, condition, waitFor, tick, msg, args...) { ++ return ++ } ++ t.FailNow() ++} ++ + // Eventuallyf asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -380,7 +476,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick + + // Exactly asserts that two objects are equal in value and type. + // +-// assert.Exactly(t, int32(123), int64(123)) ++// assert.Exactly(t, int32(123), int64(123)) + func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -393,7 +489,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. + + // Exactlyf asserts that two objects are equal in value and type. + // +-// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") ++// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") + func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -450,7 +546,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { + + // False asserts that the specified value is false. + // +-// assert.False(t, myBool) ++// assert.False(t, myBool) + func False(t TestingT, value bool, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -463,7 +559,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { + + // Falsef asserts that the specified value is false. + // +-// assert.Falsef(t, myBool, "error message %s", "formatted") ++// assert.Falsef(t, myBool, "error message %s", "formatted") + func Falsef(t TestingT, value bool, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -500,9 +596,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { + + // Greater asserts that the first element is greater than the second + // +-// assert.Greater(t, 2, 1) +-// assert.Greater(t, float64(2), float64(1)) +-// assert.Greater(t, "b", "a") ++// assert.Greater(t, 2, 1) ++// assert.Greater(t, float64(2), float64(1)) ++// assert.Greater(t, "b", "a") + func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -515,10 +611,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface + + // GreaterOrEqual asserts that the first element is greater than or equal to the second + // +-// assert.GreaterOrEqual(t, 2, 1) +-// assert.GreaterOrEqual(t, 2, 2) +-// assert.GreaterOrEqual(t, "b", "a") +-// assert.GreaterOrEqual(t, "b", "b") ++// assert.GreaterOrEqual(t, 2, 1) ++// assert.GreaterOrEqual(t, 2, 2) ++// assert.GreaterOrEqual(t, "b", "a") ++// assert.GreaterOrEqual(t, "b", "b") + func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -531,10 +627,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in + + // GreaterOrEqualf asserts that the first element is greater than or equal to the second + // +-// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +-// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") ++// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") + func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -547,9 +643,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg + + // Greaterf asserts that the first element is greater than the second + // +-// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +-// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +-// assert.Greaterf(t, "b", "a", "error message %s", "formatted") ++// assert.Greaterf(t, 2, 1, "error message %s", "formatted") ++// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") ++// assert.Greaterf(t, "b", "a", "error message %s", "formatted") + func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -563,7 +659,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in + // HTTPBodyContains asserts that a specified handler returns a + // body that contains a string. + // +-// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { +@@ -579,7 +675,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s + // HTTPBodyContainsf asserts that a specified handler returns a + // body that contains a string. + // +-// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { +@@ -595,7 +691,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url + // HTTPBodyNotContains asserts that a specified handler returns a + // body that does not contain a string. + // +-// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { +@@ -611,7 +707,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur + // HTTPBodyNotContainsf asserts that a specified handler returns a + // body that does not contain a string. + // +-// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { +@@ -626,7 +722,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u + + // HTTPError asserts that a specified handler returns an error status code. + // +-// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -641,7 +737,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, + + // HTTPErrorf asserts that a specified handler returns an error status code. + // +-// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -656,7 +752,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, + + // HTTPRedirect asserts that a specified handler returns a redirect status code. + // +-// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -671,7 +767,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin + + // HTTPRedirectf asserts that a specified handler returns a redirect status code. + // +-// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -686,7 +782,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri + + // HTTPStatusCode asserts that a specified handler returns a specified status code. + // +-// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) ++// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { +@@ -701,7 +797,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str + + // HTTPStatusCodef asserts that a specified handler returns a specified status code. + // +-// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") ++// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { +@@ -716,7 +812,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st + + // HTTPSuccess asserts that a specified handler returns a success status code. + // +-// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) ++// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -731,7 +827,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string + + // HTTPSuccessf asserts that a specified handler returns a success status code. + // +-// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") ++// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -746,7 +842,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin + + // Implements asserts that an object is implemented by the specified interface. + // +-// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) ++// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) + func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -759,7 +855,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg + + // Implementsf asserts that an object is implemented by the specified interface. + // +-// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") ++// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") + func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -772,7 +868,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms + + // InDelta asserts that the two numerals are within delta of each other. + // +-// assert.InDelta(t, math.Pi, 22/7.0, 0.01) ++// assert.InDelta(t, math.Pi, 22/7.0, 0.01) + func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -829,7 +925,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f + + // InDeltaf asserts that the two numerals are within delta of each other. + // +-// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") ++// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") + func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -886,9 +982,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl + + // IsDecreasing asserts that the collection is decreasing + // +-// assert.IsDecreasing(t, []int{2, 1, 0}) +-// assert.IsDecreasing(t, []float{2, 1}) +-// assert.IsDecreasing(t, []string{"b", "a"}) ++// assert.IsDecreasing(t, []int{2, 1, 0}) ++// assert.IsDecreasing(t, []float{2, 1}) ++// assert.IsDecreasing(t, []string{"b", "a"}) + func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -901,9 +997,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + + // IsDecreasingf asserts that the collection is decreasing + // +-// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +-// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +-// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") ++// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") + func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -916,9 +1012,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface + + // IsIncreasing asserts that the collection is increasing + // +-// assert.IsIncreasing(t, []int{1, 2, 3}) +-// assert.IsIncreasing(t, []float{1, 2}) +-// assert.IsIncreasing(t, []string{"a", "b"}) ++// assert.IsIncreasing(t, []int{1, 2, 3}) ++// assert.IsIncreasing(t, []float{1, 2}) ++// assert.IsIncreasing(t, []string{"a", "b"}) + func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -931,9 +1027,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + + // IsIncreasingf asserts that the collection is increasing + // +-// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +-// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +-// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") ++// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") + func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -946,9 +1042,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface + + // IsNonDecreasing asserts that the collection is not decreasing + // +-// assert.IsNonDecreasing(t, []int{1, 1, 2}) +-// assert.IsNonDecreasing(t, []float{1, 2}) +-// assert.IsNonDecreasing(t, []string{"a", "b"}) ++// assert.IsNonDecreasing(t, []int{1, 1, 2}) ++// assert.IsNonDecreasing(t, []float{1, 2}) ++// assert.IsNonDecreasing(t, []string{"a", "b"}) + func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -961,9 +1057,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) + + // IsNonDecreasingf asserts that the collection is not decreasing + // +-// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +-// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +-// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") ++// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") + func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -976,9 +1072,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf + + // IsNonIncreasing asserts that the collection is not increasing + // +-// assert.IsNonIncreasing(t, []int{2, 1, 1}) +-// assert.IsNonIncreasing(t, []float{2, 1}) +-// assert.IsNonIncreasing(t, []string{"b", "a"}) ++// assert.IsNonIncreasing(t, []int{2, 1, 1}) ++// assert.IsNonIncreasing(t, []float{2, 1}) ++// assert.IsNonIncreasing(t, []string{"b", "a"}) + func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -991,9 +1087,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) + + // IsNonIncreasingf asserts that the collection is not increasing + // +-// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +-// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +-// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") ++// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") + func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1028,7 +1124,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin + + // JSONEq asserts that two JSON strings are equivalent. + // +-// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) ++// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1041,7 +1137,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ + + // JSONEqf asserts that two JSON strings are equivalent. + // +-// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") ++// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") + func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1055,7 +1151,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int + // Len asserts that the specified object has specific length. + // Len also fails if the object has a type that len() not accept. + // +-// assert.Len(t, mySlice, 3) ++// assert.Len(t, mySlice, 3) + func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1069,7 +1165,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) + // Lenf asserts that the specified object has specific length. + // Lenf also fails if the object has a type that len() not accept. + // +-// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") ++// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") + func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1082,9 +1178,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf + + // Less asserts that the first element is less than the second + // +-// assert.Less(t, 1, 2) +-// assert.Less(t, float64(1), float64(2)) +-// assert.Less(t, "a", "b") ++// assert.Less(t, 1, 2) ++// assert.Less(t, float64(1), float64(2)) ++// assert.Less(t, "a", "b") + func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1097,10 +1193,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) + + // LessOrEqual asserts that the first element is less than or equal to the second + // +-// assert.LessOrEqual(t, 1, 2) +-// assert.LessOrEqual(t, 2, 2) +-// assert.LessOrEqual(t, "a", "b") +-// assert.LessOrEqual(t, "b", "b") ++// assert.LessOrEqual(t, 1, 2) ++// assert.LessOrEqual(t, 2, 2) ++// assert.LessOrEqual(t, "a", "b") ++// assert.LessOrEqual(t, "b", "b") + func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1113,10 +1209,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter + + // LessOrEqualf asserts that the first element is less than or equal to the second + // +-// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +-// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +-// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +-// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") ++// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") ++// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") ++// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") ++// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") + func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1129,9 +1225,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . + + // Lessf asserts that the first element is less than the second + // +-// assert.Lessf(t, 1, 2, "error message %s", "formatted") +-// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +-// assert.Lessf(t, "a", "b", "error message %s", "formatted") ++// assert.Lessf(t, 1, 2, "error message %s", "formatted") ++// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") ++// assert.Lessf(t, "a", "b", "error message %s", "formatted") + func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1144,8 +1240,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter + + // Negative asserts that the specified element is negative + // +-// assert.Negative(t, -1) +-// assert.Negative(t, -1.23) ++// assert.Negative(t, -1) ++// assert.Negative(t, -1.23) + func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1158,8 +1254,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { + + // Negativef asserts that the specified element is negative + // +-// assert.Negativef(t, -1, "error message %s", "formatted") +-// assert.Negativef(t, -1.23, "error message %s", "formatted") ++// assert.Negativef(t, -1, "error message %s", "formatted") ++// assert.Negativef(t, -1.23, "error message %s", "formatted") + func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1173,7 +1269,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { + // Never asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) ++// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) + func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1187,7 +1283,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D + // Neverf asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1200,7 +1296,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. + + // Nil asserts that the specified object is nil. + // +-// assert.Nil(t, err) ++// assert.Nil(t, err) + func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1213,7 +1309,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + + // Nilf asserts that the specified object is nil. + // +-// assert.Nilf(t, err, "error message %s", "formatted") ++// assert.Nilf(t, err, "error message %s", "formatted") + func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1250,10 +1346,10 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { + + // NoError asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.NoError(t, err) { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if assert.NoError(t, err) { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func NoError(t TestingT, err error, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1266,10 +1362,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { + + // NoErrorf asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if assert.NoErrorf(t, err, "error message %s", "formatted") { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if assert.NoErrorf(t, err, "error message %s", "formatted") { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1307,9 +1403,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// assert.NotContains(t, "Hello World", "Earth") +-// assert.NotContains(t, ["Hello", "World"], "Earth") +-// assert.NotContains(t, {"Hello": "World"}, "Earth") ++// assert.NotContains(t, "Hello World", "Earth") ++// assert.NotContains(t, ["Hello", "World"], "Earth") ++// assert.NotContains(t, {"Hello": "World"}, "Earth") + func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1323,9 +1419,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... + // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +-// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +-// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") ++// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") + func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1339,9 +1435,9 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if assert.NotEmpty(t, obj) { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if assert.NotEmpty(t, obj) { ++// assert.Equal(t, "two", obj[1]) ++// } + func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1355,9 +1451,9 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { ++// assert.Equal(t, "two", obj[1]) ++// } + func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1370,7 +1466,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) + + // NotEqual asserts that the specified values are NOT equal. + // +-// assert.NotEqual(t, obj1, obj2) ++// assert.NotEqual(t, obj1, obj2) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1386,7 +1482,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . + + // NotEqualValues asserts that two objects are not equal even when converted to the same type + // +-// assert.NotEqualValues(t, obj1, obj2) ++// assert.NotEqualValues(t, obj1, obj2) + func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1399,7 +1495,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd + + // NotEqualValuesf asserts that two objects are not equal even when converted to the same type + // +-// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") ++// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") + func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1412,7 +1508,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s + + // NotEqualf asserts that the specified values are NOT equal. + // +-// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") ++// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1452,7 +1548,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf + + // NotNil asserts that the specified object is not nil. + // +-// assert.NotNil(t, err) ++// assert.NotNil(t, err) + func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1465,7 +1561,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + + // NotNilf asserts that the specified object is not nil. + // +-// assert.NotNilf(t, err, "error message %s", "formatted") ++// assert.NotNilf(t, err, "error message %s", "formatted") + func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1478,7 +1574,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { + + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// assert.NotPanics(t, func(){ RemainCalm() }) ++// assert.NotPanics(t, func(){ RemainCalm() }) + func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1491,7 +1587,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + + // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") ++// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") + func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1504,8 +1600,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac + + // NotRegexp asserts that a specified regexp does not match a string. + // +-// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +-// assert.NotRegexp(t, "^start", "it's not starting") ++// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") ++// assert.NotRegexp(t, "^start", "it's not starting") + func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1518,8 +1614,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf + + // NotRegexpf asserts that a specified regexp does not match a string. + // +-// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +-// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") ++// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") ++// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") + func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1532,7 +1628,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. + + // NotSame asserts that two pointers do not reference the same object. + // +-// assert.NotSame(t, ptr1, ptr2) ++// assert.NotSame(t, ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1548,7 +1644,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. + + // NotSamef asserts that two pointers do not reference the same object. + // +-// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") ++// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1565,7 +1661,7 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, + // NotSubset asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") ++// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") + func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1579,7 +1675,7 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i + // NotSubsetf asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") ++// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") + func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1614,7 +1710,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { + + // Panics asserts that the code inside the specified PanicTestFunc panics. + // +-// assert.Panics(t, func(){ GoCrazy() }) ++// assert.Panics(t, func(){ GoCrazy() }) + func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1629,7 +1725,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) ++// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) + func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1644,7 +1740,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1658,7 +1754,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg + // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) ++// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) + func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1672,7 +1768,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m + // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1685,7 +1781,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, + + // Panicsf asserts that the code inside the specified PanicTestFunc panics. + // +-// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") ++// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") + func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1698,8 +1794,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} + + // Positive asserts that the specified element is positive + // +-// assert.Positive(t, 1) +-// assert.Positive(t, 1.23) ++// assert.Positive(t, 1) ++// assert.Positive(t, 1.23) + func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1712,8 +1808,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { + + // Positivef asserts that the specified element is positive + // +-// assert.Positivef(t, 1, "error message %s", "formatted") +-// assert.Positivef(t, 1.23, "error message %s", "formatted") ++// assert.Positivef(t, 1, "error message %s", "formatted") ++// assert.Positivef(t, 1.23, "error message %s", "formatted") + func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1726,8 +1822,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { + + // Regexp asserts that a specified regexp matches a string. + // +-// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +-// assert.Regexp(t, "start...$", "it's not starting") ++// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") ++// assert.Regexp(t, "start...$", "it's not starting") + func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1740,8 +1836,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface + + // Regexpf asserts that a specified regexp matches a string. + // +-// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +-// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") ++// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") ++// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") + func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1754,7 +1850,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in + + // Same asserts that two pointers reference the same object. + // +-// assert.Same(t, ptr1, ptr2) ++// assert.Same(t, ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1770,7 +1866,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in + + // Samef asserts that two pointers reference the same object. + // +-// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") ++// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1787,7 +1883,7 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg + // Subset asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") ++// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") + func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1801,7 +1897,7 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte + // Subsetf asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") ++// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") + func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1814,7 +1910,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args + + // True asserts that the specified value is true. + // +-// assert.True(t, myBool) ++// assert.True(t, myBool) + func True(t TestingT, value bool, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1827,7 +1923,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { + + // Truef asserts that the specified value is true. + // +-// assert.Truef(t, myBool, "error message %s", "formatted") ++// assert.Truef(t, myBool, "error message %s", "formatted") + func Truef(t TestingT, value bool, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1840,7 +1936,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { + + // WithinDuration asserts that the two times are within duration delta of each other. + // +-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) ++// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) + func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1853,7 +1949,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time + + // WithinDurationf asserts that the two times are within duration delta of each other. + // +-// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") ++// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") + func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1866,7 +1962,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim + + // WithinRange asserts that a time is within a time range (inclusive). + // +-// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) ++// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) + func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +@@ -1879,7 +1975,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m + + // WithinRangef asserts that a time is within a time range (inclusive). + // +-// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") ++// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") + func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() +diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go +index 960bf6f2cab..3b5b09330a4 100644 +--- a/vendor/github.com/stretchr/testify/require/require_forward.go ++++ b/vendor/github.com/stretchr/testify/require/require_forward.go +@@ -31,9 +31,9 @@ func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...inte + // Contains asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// a.Contains("Hello World", "World") +-// a.Contains(["Hello", "World"], "World") +-// a.Contains({"Hello": "World"}, "Hello") ++// a.Contains("Hello World", "World") ++// a.Contains(["Hello", "World"], "World") ++// a.Contains({"Hello": "World"}, "Hello") + func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -44,9 +44,9 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs .. + // Containsf asserts that the specified string, list(array, slice...) or map contains the + // specified substring or element. + // +-// a.Containsf("Hello World", "World", "error message %s", "formatted") +-// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +-// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") ++// a.Containsf("Hello World", "World", "error message %s", "formatted") ++// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") ++// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") + func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -99,7 +99,7 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st + // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// a.Empty(obj) ++// a.Empty(obj) + func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -110,7 +110,7 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// a.Emptyf(obj, "error message %s", "formatted") ++// a.Emptyf(obj, "error message %s", "formatted") + func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -120,7 +120,7 @@ func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) + + // Equal asserts that two objects are equal. + // +-// a.Equal(123, 123) ++// a.Equal(123, 123) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -135,8 +135,8 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs + // EqualError asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// a.EqualError(err, expectedErrorString) ++// actualObj, err := SomeFunction() ++// a.EqualError(err, expectedErrorString) + func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -147,8 +147,8 @@ func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ... + // EqualErrorf asserts that a function returned an error (i.e. not `nil`) + // and that it is equal to the provided error. + // +-// actualObj, err := SomeFunction() +-// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") + func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -156,10 +156,44 @@ func (a *Assertions) EqualErrorf(theError error, errString string, msg string, a + EqualErrorf(a.t, theError, errString, msg, args...) + } + ++// EqualExportedValues asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// a.EqualExportedValues(S{1, 2}, S{1, 3}) => true ++// a.EqualExportedValues(S{1, 2}, S{2, 3}) => false ++func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ EqualExportedValues(a.t, expected, actual, msgAndArgs...) ++} ++ ++// EqualExportedValuesf asserts that the types of two objects are equal and their public ++// fields are also equal. This is useful for comparing structs that have private fields ++// that could potentially differ. ++// ++// type S struct { ++// Exported int ++// notExported int ++// } ++// a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true ++// a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false ++func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ EqualExportedValuesf(a.t, expected, actual, msg, args...) ++} ++ + // EqualValues asserts that two objects are equal or convertable to the same types + // and equal. + // +-// a.EqualValues(uint32(123), int32(123)) ++// a.EqualValues(uint32(123), int32(123)) + func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -170,7 +204,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn + // EqualValuesf asserts that two objects are equal or convertable to the same types + // and equal. + // +-// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") ++// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") + func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -180,7 +214,7 @@ func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg + + // Equalf asserts that two objects are equal. + // +-// a.Equalf(123, 123, "error message %s", "formatted") ++// a.Equalf(123, 123, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). Function equality +@@ -194,10 +228,10 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string + + // Error asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.Error(err) { +-// assert.Equal(t, expectedError, err) +-// } ++// actualObj, err := SomeFunction() ++// if a.Error(err) { ++// assert.Equal(t, expectedError, err) ++// } + func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -226,8 +260,8 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. + // ErrorContains asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// a.ErrorContains(err, expectedErrorSubString) ++// actualObj, err := SomeFunction() ++// a.ErrorContains(err, expectedErrorSubString) + func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -238,8 +272,8 @@ func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs . + // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) + // and that the error contains the specified substring. + // +-// actualObj, err := SomeFunction() +-// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") ++// actualObj, err := SomeFunction() ++// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") + func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -267,10 +301,10 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter + + // Errorf asserts that a function returned an error (i.e. not `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.Errorf(err, "error message %s", "formatted") { +-// assert.Equal(t, expectedErrorf, err) +-// } ++// actualObj, err := SomeFunction() ++// if a.Errorf(err, "error message %s", "formatted") { ++// assert.Equal(t, expectedErrorf, err) ++// } + func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -281,7 +315,7 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { + // Eventually asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) ++// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) + func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -289,10 +323,60 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti + Eventually(a.t, condition, waitFor, tick, msgAndArgs...) + } + ++// EventuallyWithT asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// a.EventuallyWithT(func(c *assert.CollectT) { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...) ++} ++ ++// EventuallyWithTf asserts that given condition will be met in waitFor time, ++// periodically checking target function each tick. In contrast to Eventually, ++// it supplies a CollectT to the condition function, so that the condition ++// function can use the CollectT to call other assertions. ++// The condition is considered "met" if no errors are raised in a tick. ++// The supplied CollectT collects all errors from one tick (if there are any). ++// If the condition is not met before waitFor, the collected errors of ++// the last tick are copied to t. ++// ++// externalValue := false ++// go func() { ++// time.Sleep(8*time.Second) ++// externalValue = true ++// }() ++// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { ++// // add assertions as needed; any assertion failure will fail the current tick ++// assert.True(c, externalValue, "expected 'externalValue' to be true") ++// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") ++func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { ++ if h, ok := a.t.(tHelper); ok { ++ h.Helper() ++ } ++ EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...) ++} ++ + // Eventuallyf asserts that given condition will be met in waitFor time, + // periodically checking target function each tick. + // +-// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -302,7 +386,7 @@ func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, t + + // Exactly asserts that two objects are equal in value and type. + // +-// a.Exactly(int32(123), int64(123)) ++// a.Exactly(int32(123), int64(123)) + func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -312,7 +396,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg + + // Exactlyf asserts that two objects are equal in value and type. + // +-// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") ++// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") + func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -354,7 +438,7 @@ func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{ + + // False asserts that the specified value is false. + // +-// a.False(myBool) ++// a.False(myBool) + func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -364,7 +448,7 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + + // Falsef asserts that the specified value is false. + // +-// a.Falsef(myBool, "error message %s", "formatted") ++// a.Falsef(myBool, "error message %s", "formatted") + func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -392,9 +476,9 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { + + // Greater asserts that the first element is greater than the second + // +-// a.Greater(2, 1) +-// a.Greater(float64(2), float64(1)) +-// a.Greater("b", "a") ++// a.Greater(2, 1) ++// a.Greater(float64(2), float64(1)) ++// a.Greater("b", "a") + func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -404,10 +488,10 @@ func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...inter + + // GreaterOrEqual asserts that the first element is greater than or equal to the second + // +-// a.GreaterOrEqual(2, 1) +-// a.GreaterOrEqual(2, 2) +-// a.GreaterOrEqual("b", "a") +-// a.GreaterOrEqual("b", "b") ++// a.GreaterOrEqual(2, 1) ++// a.GreaterOrEqual(2, 2) ++// a.GreaterOrEqual("b", "a") ++// a.GreaterOrEqual("b", "b") + func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -417,10 +501,10 @@ func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs . + + // GreaterOrEqualf asserts that the first element is greater than or equal to the second + // +-// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +-// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +-// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +-// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") ++// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") ++// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") ++// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") ++// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") + func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -430,9 +514,9 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, + + // Greaterf asserts that the first element is greater than the second + // +-// a.Greaterf(2, 1, "error message %s", "formatted") +-// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") +-// a.Greaterf("b", "a", "error message %s", "formatted") ++// a.Greaterf(2, 1, "error message %s", "formatted") ++// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") ++// a.Greaterf("b", "a", "error message %s", "formatted") + func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -443,7 +527,7 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args . + // HTTPBodyContains asserts that a specified handler returns a + // body that contains a string. + // +-// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { +@@ -456,7 +540,7 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u + // HTTPBodyContainsf asserts that a specified handler returns a + // body that contains a string. + // +-// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { +@@ -469,7 +553,7 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, + // HTTPBodyNotContains asserts that a specified handler returns a + // body that does not contain a string. + // +-// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") ++// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { +@@ -482,7 +566,7 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string + // HTTPBodyNotContainsf asserts that a specified handler returns a + // body that does not contain a string. + // +-// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") ++// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { +@@ -494,7 +578,7 @@ func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method strin + + // HTTPError asserts that a specified handler returns an error status code. + // +-// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -506,7 +590,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri + + // HTTPErrorf asserts that a specified handler returns an error status code. + // +-// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -518,7 +602,7 @@ func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url str + + // HTTPRedirect asserts that a specified handler returns a redirect status code. + // +-// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -530,7 +614,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s + + // HTTPRedirectf asserts that a specified handler returns a redirect status code. + // +-// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} ++// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -542,7 +626,7 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url + + // HTTPStatusCode asserts that a specified handler returns a specified status code. + // +-// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) ++// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { +@@ -554,7 +638,7 @@ func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url + + // HTTPStatusCodef asserts that a specified handler returns a specified status code. + // +-// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") ++// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { +@@ -566,7 +650,7 @@ func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, ur + + // HTTPSuccess asserts that a specified handler returns a success status code. + // +-// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) ++// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { +@@ -578,7 +662,7 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st + + // HTTPSuccessf asserts that a specified handler returns a success status code. + // +-// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") ++// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") + // + // Returns whether the assertion was successful (true) or not (false). + func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { +@@ -590,7 +674,7 @@ func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url s + + // Implements asserts that an object is implemented by the specified interface. + // +-// a.Implements((*MyInterface)(nil), new(MyObject)) ++// a.Implements((*MyInterface)(nil), new(MyObject)) + func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -600,7 +684,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, + + // Implementsf asserts that an object is implemented by the specified interface. + // +-// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") ++// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") + func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -610,7 +694,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} + + // InDelta asserts that the two numerals are within delta of each other. + // +-// a.InDelta(math.Pi, 22/7.0, 0.01) ++// a.InDelta(math.Pi, 22/7.0, 0.01) + func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -652,7 +736,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del + + // InDeltaf asserts that the two numerals are within delta of each other. + // +-// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") ++// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") + func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -694,9 +778,9 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo + + // IsDecreasing asserts that the collection is decreasing + // +-// a.IsDecreasing([]int{2, 1, 0}) +-// a.IsDecreasing([]float{2, 1}) +-// a.IsDecreasing([]string{"b", "a"}) ++// a.IsDecreasing([]int{2, 1, 0}) ++// a.IsDecreasing([]float{2, 1}) ++// a.IsDecreasing([]string{"b", "a"}) + func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -706,9 +790,9 @@ func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) + + // IsDecreasingf asserts that the collection is decreasing + // +-// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +-// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +-// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") ++// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") ++// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") ++// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") + func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -718,9 +802,9 @@ func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...inter + + // IsIncreasing asserts that the collection is increasing + // +-// a.IsIncreasing([]int{1, 2, 3}) +-// a.IsIncreasing([]float{1, 2}) +-// a.IsIncreasing([]string{"a", "b"}) ++// a.IsIncreasing([]int{1, 2, 3}) ++// a.IsIncreasing([]float{1, 2}) ++// a.IsIncreasing([]string{"a", "b"}) + func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -730,9 +814,9 @@ func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) + + // IsIncreasingf asserts that the collection is increasing + // +-// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +-// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +-// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") ++// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") ++// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") ++// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") + func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -742,9 +826,9 @@ func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...inter + + // IsNonDecreasing asserts that the collection is not decreasing + // +-// a.IsNonDecreasing([]int{1, 1, 2}) +-// a.IsNonDecreasing([]float{1, 2}) +-// a.IsNonDecreasing([]string{"a", "b"}) ++// a.IsNonDecreasing([]int{1, 1, 2}) ++// a.IsNonDecreasing([]float{1, 2}) ++// a.IsNonDecreasing([]string{"a", "b"}) + func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -754,9 +838,9 @@ func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface + + // IsNonDecreasingf asserts that the collection is not decreasing + // +-// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +-// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +-// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") ++// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") + func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -766,9 +850,9 @@ func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...in + + // IsNonIncreasing asserts that the collection is not increasing + // +-// a.IsNonIncreasing([]int{2, 1, 1}) +-// a.IsNonIncreasing([]float{2, 1}) +-// a.IsNonIncreasing([]string{"b", "a"}) ++// a.IsNonIncreasing([]int{2, 1, 1}) ++// a.IsNonIncreasing([]float{2, 1}) ++// a.IsNonIncreasing([]string{"b", "a"}) + func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -778,9 +862,9 @@ func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface + + // IsNonIncreasingf asserts that the collection is not increasing + // +-// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +-// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +-// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") ++// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") + func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -806,7 +890,7 @@ func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg s + + // JSONEq asserts that two JSON strings are equivalent. + // +-// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) ++// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -816,7 +900,7 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf + + // JSONEqf asserts that two JSON strings are equivalent. + // +-// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") ++// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") + func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -827,7 +911,7 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. + // Len asserts that the specified object has specific length. + // Len also fails if the object has a type that len() not accept. + // +-// a.Len(mySlice, 3) ++// a.Len(mySlice, 3) + func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -838,7 +922,7 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface + // Lenf asserts that the specified object has specific length. + // Lenf also fails if the object has a type that len() not accept. + // +-// a.Lenf(mySlice, 3, "error message %s", "formatted") ++// a.Lenf(mySlice, 3, "error message %s", "formatted") + func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -848,9 +932,9 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in + + // Less asserts that the first element is less than the second + // +-// a.Less(1, 2) +-// a.Less(float64(1), float64(2)) +-// a.Less("a", "b") ++// a.Less(1, 2) ++// a.Less(float64(1), float64(2)) ++// a.Less("a", "b") + func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -860,10 +944,10 @@ func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interfac + + // LessOrEqual asserts that the first element is less than or equal to the second + // +-// a.LessOrEqual(1, 2) +-// a.LessOrEqual(2, 2) +-// a.LessOrEqual("a", "b") +-// a.LessOrEqual("b", "b") ++// a.LessOrEqual(1, 2) ++// a.LessOrEqual(2, 2) ++// a.LessOrEqual("a", "b") ++// a.LessOrEqual("b", "b") + func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -873,10 +957,10 @@ func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...i + + // LessOrEqualf asserts that the first element is less than or equal to the second + // +-// a.LessOrEqualf(1, 2, "error message %s", "formatted") +-// a.LessOrEqualf(2, 2, "error message %s", "formatted") +-// a.LessOrEqualf("a", "b", "error message %s", "formatted") +-// a.LessOrEqualf("b", "b", "error message %s", "formatted") ++// a.LessOrEqualf(1, 2, "error message %s", "formatted") ++// a.LessOrEqualf(2, 2, "error message %s", "formatted") ++// a.LessOrEqualf("a", "b", "error message %s", "formatted") ++// a.LessOrEqualf("b", "b", "error message %s", "formatted") + func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -886,9 +970,9 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar + + // Lessf asserts that the first element is less than the second + // +-// a.Lessf(1, 2, "error message %s", "formatted") +-// a.Lessf(float64(1), float64(2), "error message %s", "formatted") +-// a.Lessf("a", "b", "error message %s", "formatted") ++// a.Lessf(1, 2, "error message %s", "formatted") ++// a.Lessf(float64(1), float64(2), "error message %s", "formatted") ++// a.Lessf("a", "b", "error message %s", "formatted") + func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -898,8 +982,8 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i + + // Negative asserts that the specified element is negative + // +-// a.Negative(-1) +-// a.Negative(-1.23) ++// a.Negative(-1) ++// a.Negative(-1.23) + func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -909,8 +993,8 @@ func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) { + + // Negativef asserts that the specified element is negative + // +-// a.Negativef(-1, "error message %s", "formatted") +-// a.Negativef(-1.23, "error message %s", "formatted") ++// a.Negativef(-1, "error message %s", "formatted") ++// a.Negativef(-1.23, "error message %s", "formatted") + func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -921,7 +1005,7 @@ func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) { + // Never asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) ++// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) + func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -932,7 +1016,7 @@ func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick ti + // Neverf asserts that the given condition doesn't satisfy in waitFor time, + // periodically checking the target function each tick. + // +-// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") ++// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") + func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -942,7 +1026,7 @@ func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick t + + // Nil asserts that the specified object is nil. + // +-// a.Nil(err) ++// a.Nil(err) + func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -952,7 +1036,7 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + + // Nilf asserts that the specified object is nil. + // +-// a.Nilf(err, "error message %s", "formatted") ++// a.Nilf(err, "error message %s", "formatted") + func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -980,10 +1064,10 @@ func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) + + // NoError asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.NoError(err) { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if a.NoError(err) { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -993,10 +1077,10 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { + + // NoErrorf asserts that a function returned no error (i.e. `nil`). + // +-// actualObj, err := SomeFunction() +-// if a.NoErrorf(err, "error message %s", "formatted") { +-// assert.Equal(t, expectedObj, actualObj) +-// } ++// actualObj, err := SomeFunction() ++// if a.NoErrorf(err, "error message %s", "formatted") { ++// assert.Equal(t, expectedObj, actualObj) ++// } + func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1025,9 +1109,9 @@ func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// a.NotContains("Hello World", "Earth") +-// a.NotContains(["Hello", "World"], "Earth") +-// a.NotContains({"Hello": "World"}, "Earth") ++// a.NotContains("Hello World", "Earth") ++// a.NotContains(["Hello", "World"], "Earth") ++// a.NotContains({"Hello": "World"}, "Earth") + func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1038,9 +1122,9 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs + // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the + // specified substring or element. + // +-// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +-// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +-// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") ++// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") ++// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") ++// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") + func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1051,9 +1135,9 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if a.NotEmpty(obj) { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if a.NotEmpty(obj) { ++// assert.Equal(t, "two", obj[1]) ++// } + func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1064,9 +1148,9 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either + // a slice or a channel with len == 0. + // +-// if a.NotEmptyf(obj, "error message %s", "formatted") { +-// assert.Equal(t, "two", obj[1]) +-// } ++// if a.NotEmptyf(obj, "error message %s", "formatted") { ++// assert.Equal(t, "two", obj[1]) ++// } + func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1076,7 +1160,7 @@ func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface + + // NotEqual asserts that the specified values are NOT equal. + // +-// a.NotEqual(obj1, obj2) ++// a.NotEqual(obj1, obj2) + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1089,7 +1173,7 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr + + // NotEqualValues asserts that two objects are not equal even when converted to the same type + // +-// a.NotEqualValues(obj1, obj2) ++// a.NotEqualValues(obj1, obj2) + func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1099,7 +1183,7 @@ func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, ms + + // NotEqualValuesf asserts that two objects are not equal even when converted to the same type + // +-// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") ++// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") + func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1109,7 +1193,7 @@ func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, m + + // NotEqualf asserts that the specified values are NOT equal. + // +-// a.NotEqualf(obj1, obj2, "error message %s", "formatted") ++// a.NotEqualf(obj1, obj2, "error message %s", "formatted") + // + // Pointer variable equality is determined based on the equality of the + // referenced values (as opposed to the memory addresses). +@@ -1140,7 +1224,7 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in + + // NotNil asserts that the specified object is not nil. + // +-// a.NotNil(err) ++// a.NotNil(err) + func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1150,7 +1234,7 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + + // NotNilf asserts that the specified object is not nil. + // +-// a.NotNilf(err, "error message %s", "formatted") ++// a.NotNilf(err, "error message %s", "formatted") + func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1160,7 +1244,7 @@ func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{} + + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// a.NotPanics(func(){ RemainCalm() }) ++// a.NotPanics(func(){ RemainCalm() }) + func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1170,7 +1254,7 @@ func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{} + + // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. + // +-// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") ++// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") + func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1180,8 +1264,8 @@ func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...inte + + // NotRegexp asserts that a specified regexp does not match a string. + // +-// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +-// a.NotRegexp("^start", "it's not starting") ++// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") ++// a.NotRegexp("^start", "it's not starting") + func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1191,8 +1275,8 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in + + // NotRegexpf asserts that a specified regexp does not match a string. + // +-// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +-// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") ++// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") ++// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") + func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1202,7 +1286,7 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg + + // NotSame asserts that two pointers do not reference the same object. + // +-// a.NotSame(ptr1, ptr2) ++// a.NotSame(ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1215,7 +1299,7 @@ func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArg + + // NotSamef asserts that two pointers do not reference the same object. + // +-// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") ++// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1229,7 +1313,7 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri + // NotSubset asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") ++// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") + func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1240,7 +1324,7 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs + // NotSubsetf asserts that the specified list(array, slice...) contains not all + // elements given in the specified subset(array, slice...). + // +-// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") ++// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") + func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1266,7 +1350,7 @@ func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) { + + // Panics asserts that the code inside the specified PanicTestFunc panics. + // +-// a.Panics(func(){ GoCrazy() }) ++// a.Panics(func(){ GoCrazy() }) + func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1278,7 +1362,7 @@ func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// a.PanicsWithError("crazy error", func(){ GoCrazy() }) ++// a.PanicsWithError("crazy error", func(){ GoCrazy() }) + func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1290,7 +1374,7 @@ func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, m + // panics, and that the recovered panic value is an error that satisfies the + // EqualError comparison. + // +-// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1301,7 +1385,7 @@ func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, + // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) ++// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) + func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1312,7 +1396,7 @@ func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFun + // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that + // the recovered panic value equals the expected panic value. + // +-// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") ++// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1322,7 +1406,7 @@ func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFu + + // Panicsf asserts that the code inside the specified PanicTestFunc panics. + // +-// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") ++// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") + func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1332,8 +1416,8 @@ func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interfa + + // Positive asserts that the specified element is positive + // +-// a.Positive(1) +-// a.Positive(1.23) ++// a.Positive(1) ++// a.Positive(1.23) + func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1343,8 +1427,8 @@ func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) { + + // Positivef asserts that the specified element is positive + // +-// a.Positivef(1, "error message %s", "formatted") +-// a.Positivef(1.23, "error message %s", "formatted") ++// a.Positivef(1, "error message %s", "formatted") ++// a.Positivef(1.23, "error message %s", "formatted") + func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1354,8 +1438,8 @@ func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) { + + // Regexp asserts that a specified regexp matches a string. + // +-// a.Regexp(regexp.MustCompile("start"), "it's starting") +-// a.Regexp("start...$", "it's not starting") ++// a.Regexp(regexp.MustCompile("start"), "it's starting") ++// a.Regexp("start...$", "it's not starting") + func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1365,8 +1449,8 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter + + // Regexpf asserts that a specified regexp matches a string. + // +-// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +-// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") ++// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") ++// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") + func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1376,7 +1460,7 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . + + // Same asserts that two pointers reference the same object. + // +-// a.Same(ptr1, ptr2) ++// a.Same(ptr1, ptr2) + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1389,7 +1473,7 @@ func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs . + + // Samef asserts that two pointers reference the same object. + // +-// a.Samef(ptr1, ptr2, "error message %s", "formatted") ++// a.Samef(ptr1, ptr2, "error message %s", "formatted") + // + // Both arguments must be pointer variables. Pointer variable sameness is + // determined based on the equality of both type and value. +@@ -1403,7 +1487,7 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, + // Subset asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") ++// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") + func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1414,7 +1498,7 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... + // Subsetf asserts that the specified list(array, slice...) contains all + // elements given in the specified subset(array, slice...). + // +-// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") ++// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") + func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1424,7 +1508,7 @@ func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, a + + // True asserts that the specified value is true. + // +-// a.True(myBool) ++// a.True(myBool) + func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1434,7 +1518,7 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + + // Truef asserts that the specified value is true. + // +-// a.Truef(myBool, "error message %s", "formatted") ++// a.Truef(myBool, "error message %s", "formatted") + func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1444,7 +1528,7 @@ func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { + + // WithinDuration asserts that the two times are within duration delta of each other. + // +-// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) ++// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) + func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1454,7 +1538,7 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta + + // WithinDurationf asserts that the two times are within duration delta of each other. + // +-// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") ++// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") + func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1464,7 +1548,7 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta + + // WithinRange asserts that a time is within a time range (inclusive). + // +-// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) ++// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) + func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +@@ -1474,7 +1558,7 @@ func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Tim + + // WithinRangef asserts that a time is within a time range (inclusive). + // +-// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") ++// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") + func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +new file mode 100644 +index 00000000000..67f8d733999 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +@@ -0,0 +1,229 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" ++ ++import ( ++ "go.opentelemetry.io/otel" ++ "go.opentelemetry.io/otel/attribute" ++ "go.opentelemetry.io/otel/metric" ++ "go.opentelemetry.io/otel/propagation" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ++ "go.opentelemetry.io/otel/trace" ++) ++ ++const ( ++ // ScopeName is the instrumentation scope name. ++ ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" ++ // GRPCStatusCodeKey is convention for numeric status code of a gRPC request. ++ GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ++) ++ ++// Filter is a predicate used to determine whether a given request in ++// interceptor info should be traced. A Filter must return true if ++// the request should be traced. ++type Filter func(*InterceptorInfo) bool ++ ++// config is a group of options for this instrumentation. ++type config struct { ++ Filter Filter ++ Propagators propagation.TextMapPropagator ++ TracerProvider trace.TracerProvider ++ MeterProvider metric.MeterProvider ++ SpanStartOptions []trace.SpanStartOption ++ ++ ReceivedEvent bool ++ SentEvent bool ++ ++ tracer trace.Tracer ++ meter metric.Meter ++ ++ rpcDuration metric.Float64Histogram ++ rpcRequestSize metric.Int64Histogram ++ rpcResponseSize metric.Int64Histogram ++ rpcRequestsPerRPC metric.Int64Histogram ++ rpcResponsesPerRPC metric.Int64Histogram ++} ++ ++// Option applies an option value for a config. ++type Option interface { ++ apply(*config) ++} ++ ++// newConfig returns a config configured with all the passed Options. ++func newConfig(opts []Option, role string) *config { ++ c := &config{ ++ Propagators: otel.GetTextMapPropagator(), ++ TracerProvider: otel.GetTracerProvider(), ++ MeterProvider: otel.GetMeterProvider(), ++ } ++ for _, o := range opts { ++ o.apply(c) ++ } ++ ++ c.tracer = c.TracerProvider.Tracer( ++ ScopeName, ++ trace.WithInstrumentationVersion(SemVersion()), ++ ) ++ ++ c.meter = c.MeterProvider.Meter( ++ ScopeName, ++ metric.WithInstrumentationVersion(Version()), ++ metric.WithSchemaURL(semconv.SchemaURL), ++ ) ++ ++ var err error ++ c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration", ++ metric.WithDescription("Measures the duration of inbound RPC."), ++ metric.WithUnit("ms")) ++ if err != nil { ++ otel.Handle(err) ++ } ++ ++ c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", ++ metric.WithDescription("Measures size of RPC request messages (uncompressed)."), ++ metric.WithUnit("By")) ++ if err != nil { ++ otel.Handle(err) ++ } ++ ++ c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", ++ metric.WithDescription("Measures size of RPC response messages (uncompressed)."), ++ metric.WithUnit("By")) ++ if err != nil { ++ otel.Handle(err) ++ } ++ ++ c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", ++ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), ++ metric.WithUnit("{count}")) ++ if err != nil { ++ otel.Handle(err) ++ } ++ ++ c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", ++ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), ++ metric.WithUnit("{count}")) ++ if err != nil { ++ otel.Handle(err) ++ } ++ ++ return c ++} ++ ++type propagatorsOption struct{ p propagation.TextMapPropagator } ++ ++func (o propagatorsOption) apply(c *config) { ++ if o.p != nil { ++ c.Propagators = o.p ++ } ++} ++ ++// WithPropagators returns an Option to use the Propagators when extracting ++// and injecting trace context from requests. ++func WithPropagators(p propagation.TextMapPropagator) Option { ++ return propagatorsOption{p: p} ++} ++ ++type tracerProviderOption struct{ tp trace.TracerProvider } ++ ++func (o tracerProviderOption) apply(c *config) { ++ if o.tp != nil { ++ c.TracerProvider = o.tp ++ } ++} ++ ++// WithInterceptorFilter returns an Option to use the request filter. ++// ++// Deprecated: Use stats handlers instead. ++func WithInterceptorFilter(f Filter) Option { ++ return interceptorFilterOption{f: f} ++} ++ ++type interceptorFilterOption struct { ++ f Filter ++} ++ ++func (o interceptorFilterOption) apply(c *config) { ++ if o.f != nil { ++ c.Filter = o.f ++ } ++} ++ ++// WithTracerProvider returns an Option to use the TracerProvider when ++// creating a Tracer. ++func WithTracerProvider(tp trace.TracerProvider) Option { ++ return tracerProviderOption{tp: tp} ++} ++ ++type meterProviderOption struct{ mp metric.MeterProvider } ++ ++func (o meterProviderOption) apply(c *config) { ++ if o.mp != nil { ++ c.MeterProvider = o.mp ++ } ++} ++ ++// WithMeterProvider returns an Option to use the MeterProvider when ++// creating a Meter. If this option is not provide the global MeterProvider will be used. ++func WithMeterProvider(mp metric.MeterProvider) Option { ++ return meterProviderOption{mp: mp} ++} ++ ++// Event type that can be recorded, see WithMessageEvents. ++type Event int ++ ++// Different types of events that can be recorded, see WithMessageEvents. ++const ( ++ ReceivedEvents Event = iota ++ SentEvents ++) ++ ++type messageEventsProviderOption struct { ++ events []Event ++} ++ ++func (m messageEventsProviderOption) apply(c *config) { ++ for _, e := range m.events { ++ switch e { ++ case ReceivedEvents: ++ c.ReceivedEvent = true ++ case SentEvents: ++ c.SentEvent = true ++ } ++ } ++} ++ ++// WithMessageEvents configures the Handler to record the specified events ++// (span.AddEvent) on spans. By default only summary attributes are added at the ++// end of the request. ++// ++// Valid events are: ++// - ReceivedEvents: Record the number of bytes read after every gRPC read operation. ++// - SentEvents: Record the number of bytes written after every gRPC write operation. ++func WithMessageEvents(events ...Event) Option { ++ return messageEventsProviderOption{events: events} ++} ++ ++type spanStartOption struct{ opts []trace.SpanStartOption } ++ ++func (o spanStartOption) apply(c *config) { ++ c.SpanStartOptions = append(c.SpanStartOptions, o.opts...) ++} ++ ++// WithSpanOptions configures an additional set of ++// trace.SpanOptions, which are applied to each new span. ++func WithSpanOptions(opts ...trace.SpanStartOption) Option { ++ return spanStartOption{opts} ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go +new file mode 100644 +index 00000000000..958dcd87a4c +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go +@@ -0,0 +1,22 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++/* ++Package otelgrpc is the instrumentation library for [google.golang.org/grpc]. ++ ++Use [NewClientHandler] with [grpc.WithStatsHandler] to instrument a gRPC client. ++ ++Use [NewServerHandler] with [grpc.StatsHandler] to instrument a gRPC server. ++*/ ++package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go +deleted file mode 100644 +index f512cf6e315..00000000000 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go ++++ /dev/null +@@ -1,163 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" +- +-import ( +- "context" +- +- "google.golang.org/grpc/metadata" +- +- "go.opentelemetry.io/otel" +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/baggage" +- "go.opentelemetry.io/otel/propagation" +- "go.opentelemetry.io/otel/trace" +-) +- +-const ( +- // instrumentationName is the name of this instrumentation package. +- instrumentationName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" +- // GRPCStatusCodeKey is convention for numeric status code of a gRPC request. +- GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +-) +- +-// Filter is a predicate used to determine whether a given request in +-// interceptor info should be traced. A Filter must return true if +-// the request should be traced. +-type Filter func(*InterceptorInfo) bool +- +-// config is a group of options for this instrumentation. +-type config struct { +- Filter Filter +- Propagators propagation.TextMapPropagator +- TracerProvider trace.TracerProvider +-} +- +-// Option applies an option value for a config. +-type Option interface { +- apply(*config) +-} +- +-// newConfig returns a config configured with all the passed Options. +-func newConfig(opts []Option) *config { +- c := &config{ +- Propagators: otel.GetTextMapPropagator(), +- TracerProvider: otel.GetTracerProvider(), +- } +- for _, o := range opts { +- o.apply(c) +- } +- return c +-} +- +-type propagatorsOption struct{ p propagation.TextMapPropagator } +- +-func (o propagatorsOption) apply(c *config) { +- if o.p != nil { +- c.Propagators = o.p +- } +-} +- +-// WithPropagators returns an Option to use the Propagators when extracting +-// and injecting trace context from requests. +-func WithPropagators(p propagation.TextMapPropagator) Option { +- return propagatorsOption{p: p} +-} +- +-type tracerProviderOption struct{ tp trace.TracerProvider } +- +-func (o tracerProviderOption) apply(c *config) { +- if o.tp != nil { +- c.TracerProvider = o.tp +- } +-} +- +-// WithInterceptorFilter returns an Option to use the request filter. +-func WithInterceptorFilter(f Filter) Option { +- return interceptorFilterOption{f: f} +-} +- +-type interceptorFilterOption struct { +- f Filter +-} +- +-func (o interceptorFilterOption) apply(c *config) { +- if o.f != nil { +- c.Filter = o.f +- } +-} +- +-// WithTracerProvider returns an Option to use the TracerProvider when +-// creating a Tracer. +-func WithTracerProvider(tp trace.TracerProvider) Option { +- return tracerProviderOption{tp: tp} +-} +- +-type metadataSupplier struct { +- metadata *metadata.MD +-} +- +-// assert that metadataSupplier implements the TextMapCarrier interface. +-var _ propagation.TextMapCarrier = &metadataSupplier{} +- +-func (s *metadataSupplier) Get(key string) string { +- values := s.metadata.Get(key) +- if len(values) == 0 { +- return "" +- } +- return values[0] +-} +- +-func (s *metadataSupplier) Set(key string, value string) { +- s.metadata.Set(key, value) +-} +- +-func (s *metadataSupplier) Keys() []string { +- out := make([]string, 0, len(*s.metadata)) +- for key := range *s.metadata { +- out = append(out, key) +- } +- return out +-} +- +-// Inject injects correlation context and span context into the gRPC +-// metadata object. This function is meant to be used on outgoing +-// requests. +-func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { +- c := newConfig(opts) +- inject(ctx, md, c.Propagators) +-} +- +-func inject(ctx context.Context, md *metadata.MD, propagators propagation.TextMapPropagator) { +- propagators.Inject(ctx, &metadataSupplier{ +- metadata: md, +- }) +-} +- +-// Extract returns the correlation context and span context that +-// another service encoded in the gRPC metadata object with Inject. +-// This function is meant to be used on incoming requests. +-func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { +- c := newConfig(opts) +- return extract(ctx, md, c.Propagators) +-} +- +-func extract(ctx context.Context, md *metadata.MD, propagators propagation.TextMapPropagator) (baggage.Baggage, trace.SpanContext) { +- ctx = propagators.Extract(ctx, &metadataSupplier{ +- metadata: md, +- }) +- +- return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) +-} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +index 26343dfc16e..fa015e9ac88 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +@@ -20,41 +20,37 @@ import ( + "context" + "io" + "net" +- +- "github.com/golang/protobuf/proto" // nolint:staticcheck ++ "strconv" ++ "time" + + "google.golang.org/grpc" + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" ++ "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/codes" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ "go.opentelemetry.io/otel/metric" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" + ) + + type messageType attribute.KeyValue + + // Event adds an event of the messageType to the span associated with the +-// passed context with id and size (if message is a proto message). +-func (m messageType) Event(ctx context.Context, id int, message interface{}) { ++// passed context with a message id. ++func (m messageType) Event(ctx context.Context, id int, _ interface{}) { + span := trace.SpanFromContext(ctx) +- if p, ok := message.(proto.Message); ok { +- span.AddEvent("message", trace.WithAttributes( +- attribute.KeyValue(m), +- RPCMessageIDKey.Int(id), +- RPCMessageUncompressedSizeKey.Int(proto.Size(p)), +- )) +- } else { +- span.AddEvent("message", trace.WithAttributes( +- attribute.KeyValue(m), +- RPCMessageIDKey.Int(id), +- )) ++ if !span.IsRecording() { ++ return + } ++ span.AddEvent("message", trace.WithAttributes( ++ attribute.KeyValue(m), ++ RPCMessageIDKey.Int(id), ++ )) + } + + var ( +@@ -64,8 +60,15 @@ var ( + + // UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable + // for use in a grpc.Dial call. ++// ++// Deprecated: Use [NewClientHandler] instead. + func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { +- cfg := newConfig(opts) ++ cfg := newConfig(opts, "client") ++ tracer := cfg.TracerProvider.Tracer( ++ ScopeName, ++ trace.WithInstrumentationVersion(Version()), ++ ) ++ + return func( + ctx context.Context, + method string, +@@ -82,32 +85,33 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { + return invoker(ctx, method, req, reply, cc, callOpts...) + } + +- requestMetadata, _ := metadata.FromOutgoingContext(ctx) +- metadataCopy := requestMetadata.Copy() ++ name, attr, _ := telemetryAttributes(method, cc.Target()) + +- tracer := cfg.TracerProvider.Tracer( +- instrumentationName, +- trace.WithInstrumentationVersion(SemVersion()), ++ startOpts := append([]trace.SpanStartOption{ ++ trace.WithSpanKind(trace.SpanKindClient), ++ trace.WithAttributes(attr...), ++ }, ++ cfg.SpanStartOptions..., + ) + +- name, attr := spanInfo(method, cc.Target()) +- var span trace.Span +- ctx, span = tracer.Start( ++ ctx, span := tracer.Start( + ctx, + name, +- trace.WithSpanKind(trace.SpanKindClient), +- trace.WithAttributes(attr...), ++ startOpts..., + ) + defer span.End() + +- inject(ctx, &metadataCopy, cfg.Propagators) +- ctx = metadata.NewOutgoingContext(ctx, metadataCopy) ++ ctx = inject(ctx, cfg.Propagators) + +- messageSent.Event(ctx, 1, req) ++ if cfg.SentEvent { ++ messageSent.Event(ctx, 1, req) ++ } + + err := invoker(ctx, method, req, reply, cc, callOpts...) + +- messageReceived.Event(ctx, 1, reply) ++ if cfg.ReceivedEvent { ++ messageReceived.Event(ctx, 1, reply) ++ } + + if err != nil { + s, _ := status.FromError(err) +@@ -143,6 +147,9 @@ type clientStream struct { + eventsDone chan struct{} + finished chan error + ++ receivedEvent bool ++ sentEvent bool ++ + receivedMessageID int + sentMessageID int + } +@@ -160,7 +167,10 @@ func (w *clientStream) RecvMsg(m interface{}) error { + w.sendStreamEvent(errorEvent, err) + } else { + w.receivedMessageID++ +- messageReceived.Event(w.Context(), w.receivedMessageID, m) ++ ++ if w.receivedEvent { ++ messageReceived.Event(w.Context(), w.receivedMessageID, m) ++ } + } + + return err +@@ -170,7 +180,10 @@ func (w *clientStream) SendMsg(m interface{}) error { + err := w.ClientStream.SendMsg(m) + + w.sentMessageID++ +- messageSent.Event(w.Context(), w.sentMessageID, m) ++ ++ if w.sentEvent { ++ messageSent.Event(w.Context(), w.sentMessageID, m) ++ } + + if err != nil { + w.sendStreamEvent(errorEvent, err) +@@ -181,7 +194,6 @@ func (w *clientStream) SendMsg(m interface{}) error { + + func (w *clientStream) Header() (metadata.MD, error) { + md, err := w.ClientStream.Header() +- + if err != nil { + w.sendStreamEvent(errorEvent, err) + } +@@ -191,7 +203,6 @@ func (w *clientStream) Header() (metadata.MD, error) { + + func (w *clientStream) CloseSend() error { + err := w.ClientStream.CloseSend() +- + if err != nil { + w.sendStreamEvent(errorEvent, err) + } +@@ -199,7 +210,7 @@ func (w *clientStream) CloseSend() error { + return err + } + +-func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc) *clientStream { ++func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, cfg *config) *clientStream { + events := make(chan streamEvent) + eventsDone := make(chan struct{}) + finished := make(chan error) +@@ -226,11 +237,13 @@ func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.Strea + }() + + return &clientStream{ +- ClientStream: s, +- desc: desc, +- events: events, +- eventsDone: eventsDone, +- finished: finished, ++ ClientStream: s, ++ desc: desc, ++ events: events, ++ eventsDone: eventsDone, ++ finished: finished, ++ receivedEvent: cfg.ReceivedEvent, ++ sentEvent: cfg.SentEvent, + } + } + +@@ -243,8 +256,15 @@ func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) { + + // StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable + // for use in a grpc.Dial call. ++// ++// Deprecated: Use [NewClientHandler] instead. + func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { +- cfg := newConfig(opts) ++ cfg := newConfig(opts, "client") ++ tracer := cfg.TracerProvider.Tracer( ++ ScopeName, ++ trace.WithInstrumentationVersion(Version()), ++ ) ++ + return func( + ctx context.Context, + desc *grpc.StreamDesc, +@@ -261,25 +281,22 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + return streamer(ctx, desc, cc, method, callOpts...) + } + +- requestMetadata, _ := metadata.FromOutgoingContext(ctx) +- metadataCopy := requestMetadata.Copy() ++ name, attr, _ := telemetryAttributes(method, cc.Target()) + +- tracer := cfg.TracerProvider.Tracer( +- instrumentationName, +- trace.WithInstrumentationVersion(SemVersion()), ++ startOpts := append([]trace.SpanStartOption{ ++ trace.WithSpanKind(trace.SpanKindClient), ++ trace.WithAttributes(attr...), ++ }, ++ cfg.SpanStartOptions..., + ) + +- name, attr := spanInfo(method, cc.Target()) +- var span trace.Span +- ctx, span = tracer.Start( ++ ctx, span := tracer.Start( + ctx, + name, +- trace.WithSpanKind(trace.SpanKindClient), +- trace.WithAttributes(attr...), ++ startOpts..., + ) + +- inject(ctx, &metadataCopy, cfg.Propagators) +- ctx = metadata.NewOutgoingContext(ctx, metadataCopy) ++ ctx = inject(ctx, cfg.Propagators) + + s, err := streamer(ctx, desc, cc, method, callOpts...) + if err != nil { +@@ -289,7 +306,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + span.End() + return s, err + } +- stream := wrapClientStream(ctx, s, desc) ++ stream := wrapClientStream(ctx, s, desc, cfg) + + go func() { + err := <-stream.finished +@@ -311,8 +328,15 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + + // UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable + // for use in a grpc.NewServer call. ++// ++// Deprecated: Use [NewServerHandler] instead. + func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { +- cfg := newConfig(opts) ++ cfg := newConfig(opts, "server") ++ tracer := cfg.TracerProvider.Tracer( ++ ScopeName, ++ trace.WithInstrumentationVersion(Version()), ++ ) ++ + return func( + ctx context.Context, + req interface{}, +@@ -327,38 +351,49 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + return handler(ctx, req) + } + +- requestMetadata, _ := metadata.FromIncomingContext(ctx) +- metadataCopy := requestMetadata.Copy() +- +- bags, spanCtx := Extract(ctx, &metadataCopy, opts...) +- ctx = baggage.ContextWithBaggage(ctx, bags) ++ ctx = extract(ctx, cfg.Propagators) ++ name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx)) + +- tracer := cfg.TracerProvider.Tracer( +- instrumentationName, +- trace.WithInstrumentationVersion(SemVersion()), ++ startOpts := append([]trace.SpanStartOption{ ++ trace.WithSpanKind(trace.SpanKindServer), ++ trace.WithAttributes(attr...), ++ }, ++ cfg.SpanStartOptions..., + ) + +- name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) + ctx, span := tracer.Start( +- trace.ContextWithRemoteSpanContext(ctx, spanCtx), ++ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, +- trace.WithSpanKind(trace.SpanKindServer), +- trace.WithAttributes(attr...), ++ startOpts..., + ) + defer span.End() + +- messageReceived.Event(ctx, 1, req) ++ if cfg.ReceivedEvent { ++ messageReceived.Event(ctx, 1, req) ++ } ++ ++ before := time.Now() + + resp, err := handler(ctx, req) ++ ++ s, _ := status.FromError(err) + if err != nil { +- s, _ := status.FromError(err) +- span.SetStatus(codes.Error, s.Message()) +- span.SetAttributes(statusCodeAttr(s.Code())) +- messageSent.Event(ctx, 1, s.Proto()) ++ statusCode, msg := serverStatus(s) ++ span.SetStatus(statusCode, msg) ++ if cfg.SentEvent { ++ messageSent.Event(ctx, 1, s.Proto()) ++ } + } else { +- span.SetAttributes(statusCodeAttr(grpc_codes.OK)) +- messageSent.Event(ctx, 1, resp) ++ if cfg.SentEvent { ++ messageSent.Event(ctx, 1, resp) ++ } + } ++ grpcStatusCodeAttr := statusCodeAttr(s.Code()) ++ span.SetAttributes(grpcStatusCodeAttr) ++ ++ elapsedTime := time.Since(before).Milliseconds() ++ metricAttrs = append(metricAttrs, grpcStatusCodeAttr) ++ cfg.rpcDuration.Record(ctx, float64(elapsedTime), metric.WithAttributes(metricAttrs...)) + + return resp, err + } +@@ -372,6 +407,9 @@ type serverStream struct { + + receivedMessageID int + sentMessageID int ++ ++ receivedEvent bool ++ sentEvent bool + } + + func (w *serverStream) Context() context.Context { +@@ -383,7 +421,9 @@ func (w *serverStream) RecvMsg(m interface{}) error { + + if err == nil { + w.receivedMessageID++ +- messageReceived.Event(w.Context(), w.receivedMessageID, m) ++ if w.receivedEvent { ++ messageReceived.Event(w.Context(), w.receivedMessageID, m) ++ } + } + + return err +@@ -393,22 +433,33 @@ func (w *serverStream) SendMsg(m interface{}) error { + err := w.ServerStream.SendMsg(m) + + w.sentMessageID++ +- messageSent.Event(w.Context(), w.sentMessageID, m) ++ if w.sentEvent { ++ messageSent.Event(w.Context(), w.sentMessageID, m) ++ } + + return err + } + +-func wrapServerStream(ctx context.Context, ss grpc.ServerStream) *serverStream { ++func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream { + return &serverStream{ +- ServerStream: ss, +- ctx: ctx, ++ ServerStream: ss, ++ ctx: ctx, ++ receivedEvent: cfg.ReceivedEvent, ++ sentEvent: cfg.SentEvent, + } + } + + // StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable + // for use in a grpc.NewServer call. ++// ++// Deprecated: Use [NewServerHandler] instead. + func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { +- cfg := newConfig(opts) ++ cfg := newConfig(opts, "server") ++ tracer := cfg.TracerProvider.Tracer( ++ ScopeName, ++ trace.WithInstrumentationVersion(Version()), ++ ) ++ + return func( + srv interface{}, + ss grpc.ServerStream, +@@ -421,34 +472,31 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + Type: StreamServer, + } + if cfg.Filter != nil && !cfg.Filter(i) { +- return handler(srv, wrapServerStream(ctx, ss)) ++ return handler(srv, wrapServerStream(ctx, ss, cfg)) + } + +- requestMetadata, _ := metadata.FromIncomingContext(ctx) +- metadataCopy := requestMetadata.Copy() +- +- bags, spanCtx := Extract(ctx, &metadataCopy, opts...) +- ctx = baggage.ContextWithBaggage(ctx, bags) ++ ctx = extract(ctx, cfg.Propagators) ++ name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx)) + +- tracer := cfg.TracerProvider.Tracer( +- instrumentationName, +- trace.WithInstrumentationVersion(SemVersion()), ++ startOpts := append([]trace.SpanStartOption{ ++ trace.WithSpanKind(trace.SpanKindServer), ++ trace.WithAttributes(attr...), ++ }, ++ cfg.SpanStartOptions..., + ) + +- name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) + ctx, span := tracer.Start( +- trace.ContextWithRemoteSpanContext(ctx, spanCtx), ++ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, +- trace.WithSpanKind(trace.SpanKindServer), +- trace.WithAttributes(attr...), ++ startOpts..., + ) + defer span.End() + +- err := handler(srv, wrapServerStream(ctx, ss)) +- ++ err := handler(srv, wrapServerStream(ctx, ss, cfg)) + if err != nil { + s, _ := status.FromError(err) +- span.SetStatus(codes.Error, s.Message()) ++ statusCode, msg := serverStatus(s) ++ span.SetStatus(statusCode, msg) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) +@@ -458,31 +506,49 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + } + } + +-// spanInfo returns a span name and all appropriate attributes from the gRPC +-// method and peer address. +-func spanInfo(fullMethod, peerAddress string) (string, []attribute.KeyValue) { +- attrs := []attribute.KeyValue{RPCSystemGRPC} +- name, mAttrs := internal.ParseFullMethod(fullMethod) +- attrs = append(attrs, mAttrs...) +- attrs = append(attrs, peerAttr(peerAddress)...) +- return name, attrs ++// telemetryAttributes returns a span name and span and metric attributes from ++// the gRPC method and peer address. ++func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) { ++ name, methodAttrs := internal.ParseFullMethod(fullMethod) ++ peerAttrs := peerAttr(peerAddress) ++ ++ attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs)) ++ attrs = append(attrs, RPCSystemGRPC) ++ attrs = append(attrs, methodAttrs...) ++ metricAttrs := attrs[:1+len(methodAttrs)] ++ attrs = append(attrs, peerAttrs...) ++ return name, attrs, metricAttrs + } + + // peerAttr returns attributes about the peer address. + func peerAttr(addr string) []attribute.KeyValue { +- host, port, err := net.SplitHostPort(addr) ++ host, p, err := net.SplitHostPort(addr) + if err != nil { +- return []attribute.KeyValue(nil) ++ return nil + } + + if host == "" { + host = "127.0.0.1" + } ++ port, err := strconv.Atoi(p) ++ if err != nil { ++ return nil ++ } + +- return []attribute.KeyValue{ +- semconv.NetPeerIPKey.String(host), +- semconv.NetPeerPortKey.String(port), ++ var attr []attribute.KeyValue ++ if ip := net.ParseIP(host); ip != nil { ++ attr = []attribute.KeyValue{ ++ semconv.NetSockPeerAddr(host), ++ semconv.NetSockPeerPort(port), ++ } ++ } else { ++ attr = []attribute.KeyValue{ ++ semconv.NetPeerName(host), ++ semconv.NetPeerPort(port), ++ } + } ++ ++ return attr + } + + // peerFromCtx returns a peer address from a context, if one exists. +@@ -498,3 +564,26 @@ func peerFromCtx(ctx context.Context) string { + func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue { + return GRPCStatusCodeKey.Int64(int64(c)) + } ++ ++// serverStatus returns a span status code and message for a given gRPC ++// status code. It maps specific gRPC status codes to a corresponding span ++// status code and message. This function is intended for use on the server ++// side of a gRPC connection. ++// ++// If the gRPC status code is Unknown, DeadlineExceeded, Unimplemented, ++// Internal, Unavailable, or DataLoss, it returns a span status code of Error ++// and the message from the gRPC status. Otherwise, it returns a span status ++// code of Unset and an empty message. ++func serverStatus(grpcStatus *status.Status) (codes.Code, string) { ++ switch grpcStatus.Code() { ++ case grpc_codes.Unknown, ++ grpc_codes.DeadlineExceeded, ++ grpc_codes.Unimplemented, ++ grpc_codes.Internal, ++ grpc_codes.Unavailable, ++ grpc_codes.DataLoss: ++ return codes.Error, grpcStatus.Message() ++ default: ++ return codes.Unset, "" ++ } ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +index bc214d363a2..cf32a9e978c 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +@@ -18,26 +18,34 @@ import ( + "strings" + + "go.opentelemetry.io/otel/attribute" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + ) + + // ParseFullMethod returns a span name following the OpenTelemetry semantic + // conventions as well as all applicable span attribute.KeyValue attributes based + // on a gRPC's FullMethod. ++// ++// Parsing is consistent with grpc-go implementation: ++// https://github.com/grpc/grpc-go/blob/v1.57.0/internal/grpcutil/method.go#L26-L39 + func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) { +- name := strings.TrimLeft(fullMethod, "/") +- parts := strings.SplitN(name, "/", 2) +- if len(parts) != 2 { ++ if !strings.HasPrefix(fullMethod, "/") { ++ // Invalid format, does not follow `/package.service/method`. ++ return fullMethod, nil ++ } ++ name := fullMethod[1:] ++ pos := strings.LastIndex(name, "/") ++ if pos < 0 { + // Invalid format, does not follow `/package.service/method`. +- return name, []attribute.KeyValue(nil) ++ return name, nil + } ++ service, method := name[:pos], name[pos+1:] + + var attrs []attribute.KeyValue +- if service := parts[0]; service != "" { +- attrs = append(attrs, semconv.RPCServiceKey.String(service)) ++ if service != "" { ++ attrs = append(attrs, semconv.RPCService(service)) + } +- if method := parts[1]; method != "" { +- attrs = append(attrs, semconv.RPCMethodKey.String(method)) ++ if method != "" { ++ attrs = append(attrs, semconv.RPCMethod(method)) + } + return name, attrs + } +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +new file mode 100644 +index 00000000000..f585fb6ae0c +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +@@ -0,0 +1,98 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" ++ ++import ( ++ "context" ++ ++ "google.golang.org/grpc/metadata" ++ ++ "go.opentelemetry.io/otel/baggage" ++ "go.opentelemetry.io/otel/propagation" ++ "go.opentelemetry.io/otel/trace" ++) ++ ++type metadataSupplier struct { ++ metadata *metadata.MD ++} ++ ++// assert that metadataSupplier implements the TextMapCarrier interface. ++var _ propagation.TextMapCarrier = &metadataSupplier{} ++ ++func (s *metadataSupplier) Get(key string) string { ++ values := s.metadata.Get(key) ++ if len(values) == 0 { ++ return "" ++ } ++ return values[0] ++} ++ ++func (s *metadataSupplier) Set(key string, value string) { ++ s.metadata.Set(key, value) ++} ++ ++func (s *metadataSupplier) Keys() []string { ++ out := make([]string, 0, len(*s.metadata)) ++ for key := range *s.metadata { ++ out = append(out, key) ++ } ++ return out ++} ++ ++// Inject injects correlation context and span context into the gRPC ++// metadata object. This function is meant to be used on outgoing ++// requests. ++// Deprecated: Unnecessary public func. ++func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { ++ c := newConfig(opts, "") ++ c.Propagators.Inject(ctx, &metadataSupplier{ ++ metadata: md, ++ }) ++} ++ ++func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { ++ md, ok := metadata.FromOutgoingContext(ctx) ++ if !ok { ++ md = metadata.MD{} ++ } ++ propagators.Inject(ctx, &metadataSupplier{ ++ metadata: &md, ++ }) ++ return metadata.NewOutgoingContext(ctx, md) ++} ++ ++// Extract returns the correlation context and span context that ++// another service encoded in the gRPC metadata object with Inject. ++// This function is meant to be used on incoming requests. ++// Deprecated: Unnecessary public func. ++func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { ++ c := newConfig(opts, "") ++ ctx = c.Propagators.Extract(ctx, &metadataSupplier{ ++ metadata: md, ++ }) ++ ++ return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) ++} ++ ++func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { ++ md, ok := metadata.FromIncomingContext(ctx) ++ if !ok { ++ md = metadata.MD{} ++ } ++ ++ return propagators.Extract(ctx, &metadataSupplier{ ++ metadata: &md, ++ }) ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +index 611c7f3017a..b65fab308f3 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +@@ -16,7 +16,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g + + import ( + "go.opentelemetry.io/otel/attribute" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + ) + + // Semantic conventions for attribute keys for gRPC. +@@ -41,7 +41,7 @@ const ( + // Semantic conventions for common RPC attributes. + var ( + // Semantic convention for gRPC as the remoting system. +- RPCSystemGRPC = semconv.RPCSystemKey.String("grpc") ++ RPCSystemGRPC = semconv.RPCSystemGRPC + + // Semantic convention for a message named message. + RPCNameMessage = RPCNameKey.String("message") +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +new file mode 100644 +index 00000000000..0211e55e003 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +@@ -0,0 +1,235 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" ++ ++import ( ++ "context" ++ "sync/atomic" ++ "time" ++ ++ grpc_codes "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/stats" ++ "google.golang.org/grpc/status" ++ ++ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" ++ "go.opentelemetry.io/otel/attribute" ++ "go.opentelemetry.io/otel/codes" ++ "go.opentelemetry.io/otel/metric" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ++ "go.opentelemetry.io/otel/trace" ++) ++ ++type gRPCContextKey struct{} ++ ++type gRPCContext struct { ++ messagesReceived int64 ++ messagesSent int64 ++ metricAttrs []attribute.KeyValue ++} ++ ++type serverHandler struct { ++ *config ++} ++ ++// NewServerHandler creates a stats.Handler for gRPC server. ++func NewServerHandler(opts ...Option) stats.Handler { ++ h := &serverHandler{ ++ config: newConfig(opts, "server"), ++ } ++ ++ return h ++} ++ ++// TagConn can attach some information to the given context. ++func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { ++ span := trace.SpanFromContext(ctx) ++ attrs := peerAttr(peerFromCtx(ctx)) ++ span.SetAttributes(attrs...) ++ return ctx ++} ++ ++// HandleConn processes the Conn stats. ++func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) { ++} ++ ++// TagRPC can attach some information to the given context. ++func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { ++ ctx = extract(ctx, h.config.Propagators) ++ ++ name, attrs := internal.ParseFullMethod(info.FullMethodName) ++ attrs = append(attrs, RPCSystemGRPC) ++ ctx, _ = h.tracer.Start( ++ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), ++ name, ++ trace.WithSpanKind(trace.SpanKindServer), ++ trace.WithAttributes(attrs...), ++ ) ++ ++ gctx := gRPCContext{ ++ metricAttrs: attrs, ++ } ++ return context.WithValue(ctx, gRPCContextKey{}, &gctx) ++} ++ ++// HandleRPC processes the RPC stats. ++func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { ++ h.handleRPC(ctx, rs) ++} ++ ++type clientHandler struct { ++ *config ++} ++ ++// NewClientHandler creates a stats.Handler for gRPC client. ++func NewClientHandler(opts ...Option) stats.Handler { ++ h := &clientHandler{ ++ config: newConfig(opts, "client"), ++ } ++ ++ return h ++} ++ ++// TagRPC can attach some information to the given context. ++func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { ++ name, attrs := internal.ParseFullMethod(info.FullMethodName) ++ attrs = append(attrs, RPCSystemGRPC) ++ ctx, _ = h.tracer.Start( ++ ctx, ++ name, ++ trace.WithSpanKind(trace.SpanKindClient), ++ trace.WithAttributes(attrs...), ++ ) ++ ++ gctx := gRPCContext{ ++ metricAttrs: attrs, ++ } ++ ++ return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) ++} ++ ++// HandleRPC processes the RPC stats. ++func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { ++ h.handleRPC(ctx, rs) ++} ++ ++// TagConn can attach some information to the given context. ++func (h *clientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { ++ span := trace.SpanFromContext(ctx) ++ attrs := peerAttr(cti.RemoteAddr.String()) ++ span.SetAttributes(attrs...) ++ return ctx ++} ++ ++// HandleConn processes the Conn stats. ++func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) { ++ // no-op ++} ++ ++func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { ++ span := trace.SpanFromContext(ctx) ++ gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) ++ var messageId int64 ++ metricAttrs := make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) ++ metricAttrs = append(metricAttrs, gctx.metricAttrs...) ++ wctx := withoutCancel(ctx) ++ ++ switch rs := rs.(type) { ++ case *stats.Begin: ++ case *stats.InPayload: ++ if gctx != nil { ++ messageId = atomic.AddInt64(&gctx.messagesReceived, 1) ++ c.rpcRequestSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) ++ } ++ ++ if c.ReceivedEvent { ++ span.AddEvent("message", ++ trace.WithAttributes( ++ semconv.MessageTypeReceived, ++ semconv.MessageIDKey.Int64(messageId), ++ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength), ++ semconv.MessageUncompressedSizeKey.Int(rs.Length), ++ ), ++ ) ++ } ++ case *stats.OutPayload: ++ if gctx != nil { ++ messageId = atomic.AddInt64(&gctx.messagesSent, 1) ++ c.rpcResponseSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) ++ } ++ ++ if c.SentEvent { ++ span.AddEvent("message", ++ trace.WithAttributes( ++ semconv.MessageTypeSent, ++ semconv.MessageIDKey.Int64(messageId), ++ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength), ++ semconv.MessageUncompressedSizeKey.Int(rs.Length), ++ ), ++ ) ++ } ++ case *stats.OutTrailer: ++ case *stats.End: ++ var rpcStatusAttr attribute.KeyValue ++ ++ if rs.Error != nil { ++ s, _ := status.FromError(rs.Error) ++ span.SetStatus(codes.Error, s.Message()) ++ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code())) ++ } else { ++ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK)) ++ } ++ span.SetAttributes(rpcStatusAttr) ++ span.End() ++ ++ metricAttrs = append(metricAttrs, rpcStatusAttr) ++ c.rpcDuration.Record(wctx, float64(rs.EndTime.Sub(rs.BeginTime)), metric.WithAttributes(metricAttrs...)) ++ c.rpcRequestsPerRPC.Record(wctx, gctx.messagesReceived, metric.WithAttributes(metricAttrs...)) ++ c.rpcResponsesPerRPC.Record(wctx, gctx.messagesSent, metric.WithAttributes(metricAttrs...)) ++ ++ default: ++ return ++ } ++} ++ ++func withoutCancel(parent context.Context) context.Context { ++ if parent == nil { ++ panic("cannot create context from nil parent") ++ } ++ return withoutCancelCtx{parent} ++} ++ ++type withoutCancelCtx struct { ++ c context.Context ++} ++ ++func (withoutCancelCtx) Deadline() (deadline time.Time, ok bool) { ++ return ++} ++ ++func (withoutCancelCtx) Done() <-chan struct{} { ++ return nil ++} ++ ++func (withoutCancelCtx) Err() error { ++ return nil ++} ++ ++func (w withoutCancelCtx) Value(key any) any { ++ return w.c.Value(key) ++} ++ ++func (w withoutCancelCtx) String() string { ++ return "withoutCancel" ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +index bf6b2aa1c11..5c13a7ceab4 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +@@ -16,11 +16,13 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g + + // Version is the current release version of the gRPC instrumentation. + func Version() string { +- return "0.35.0" ++ return "0.46.0" + // This string is updated by the pre_release.sh script during release + } + + // SemVersion is the semantic version to be supplied to tracer/meter creation. ++// ++// Deprecated: Use [Version] instead. + func SemVersion() string { +- return "semver:" + Version() ++ return Version() + } +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +index 728be09d0e0..303e5505e41 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +@@ -42,5 +42,5 @@ const ( + type Filter func(*http.Request) bool + + func newTracer(tp trace.TracerProvider) trace.Tracer { +- return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(SemVersion())) ++ return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(Version())) + } +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +index d0337f3a5e4..e4fa1b8d9d6 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +@@ -21,7 +21,6 @@ import ( + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" +- "go.opentelemetry.io/otel/metric/global" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" + ) +@@ -33,6 +32,7 @@ const ( + // config represents the configuration options available for the http.Handler + // and http.Transport types. + type config struct { ++ ServerName string + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator +@@ -64,7 +64,7 @@ func (o optionFunc) apply(c *config) { + func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), +- MeterProvider: global.MeterProvider(), ++ MeterProvider: otel.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) +@@ -77,7 +77,7 @@ func newConfig(opts ...Option) *config { + + c.Meter = c.MeterProvider.Meter( + instrumentationName, +- metric.WithInstrumentationVersion(SemVersion()), ++ metric.WithInstrumentationVersion(Version()), + ) + + return c +@@ -198,3 +198,11 @@ func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + c.ClientTrace = f + }) + } ++ ++// WithServerName returns an Option that sets the name of the (virtual) server ++// handling requests. ++func WithServerName(server string) Option { ++ return optionFunc(func(c *config) { ++ c.ServerName = server ++ }) ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +index 5b7d9daafa7..b2fbe07841c 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +@@ -21,25 +21,19 @@ import ( + + "github.com/felixge/httpsnoop" + ++ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +- "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/syncint64" + "go.opentelemetry.io/otel/propagation" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" + ) + +-var _ http.Handler = &Handler{} +- +-// Handler is http middleware that corresponds to the http.Handler interface and +-// is designed to wrap a http.Mux (or equivalent), while individual routes on +-// the mux are wrapped with WithRouteTag. A Handler will add various attributes +-// to the span using the attribute.Keys defined in this package. +-type Handler struct { ++// middleware is an http middleware which wraps the next handler in a span. ++type middleware struct { + operation string +- handler http.Handler ++ server string + + tracer trace.Tracer + meter metric.Meter +@@ -49,8 +43,8 @@ type Handler struct { + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string +- counters map[string]syncint64.Counter +- valueRecorders map[string]syncfloat64.Histogram ++ counters map[string]metric.Int64Counter ++ valueRecorders map[string]metric.Float64Histogram + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + } +@@ -59,11 +53,17 @@ func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation + } + +-// NewHandler wraps the passed handler, functioning like middleware, in a span +-// named after the operation and with any provided Options. ++// NewHandler wraps the passed handler in a span named after the operation and ++// enriches it with metrics. + func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { +- h := Handler{ +- handler: handler, ++ return NewMiddleware(operation, opts...)(handler) ++} ++ ++// NewMiddleware returns a tracing and metrics instrumentation middleware. ++// The handler returned by the middleware wraps a handler ++// in a span named after the operation and enriches it with metrics. ++func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { ++ h := middleware{ + operation: operation, + } + +@@ -76,10 +76,14 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han + h.configure(c) + h.createMeasures() + +- return &h ++ return func(next http.Handler) http.Handler { ++ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ++ h.serveHTTP(w, r, next) ++ }) ++ } + } + +-func (h *Handler) configure(c *config) { ++func (h *middleware) configure(c *config) { + h.tracer = c.Tracer + h.meter = c.Meter + h.propagators = c.Propagators +@@ -90,6 +94,7 @@ func (h *Handler) configure(c *config) { + h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn ++ h.server = c.ServerName + } + + func handleErr(err error) { +@@ -98,17 +103,17 @@ func handleErr(err error) { + } + } + +-func (h *Handler) createMeasures() { +- h.counters = make(map[string]syncint64.Counter) +- h.valueRecorders = make(map[string]syncfloat64.Histogram) ++func (h *middleware) createMeasures() { ++ h.counters = make(map[string]metric.Int64Counter) ++ h.valueRecorders = make(map[string]metric.Float64Histogram) + +- requestBytesCounter, err := h.meter.SyncInt64().Counter(RequestContentLength) ++ requestBytesCounter, err := h.meter.Int64Counter(RequestContentLength) + handleErr(err) + +- responseBytesCounter, err := h.meter.SyncInt64().Counter(ResponseContentLength) ++ responseBytesCounter, err := h.meter.Int64Counter(ResponseContentLength) + handleErr(err) + +- serverLatencyMeasure, err := h.meter.SyncFloat64().Histogram(ServerLatency) ++ serverLatencyMeasure, err := h.meter.Float64Histogram(ServerLatency) + handleErr(err) + + h.counters[RequestContentLength] = requestBytesCounter +@@ -116,19 +121,27 @@ func (h *Handler) createMeasures() { + h.valueRecorders[ServerLatency] = serverLatencyMeasure + } + +-// ServeHTTP serves HTTP requests (http.Handler). +-func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ++// serveHTTP sets up tracing and calls the given next http.Handler with the span ++// context injected into the request context. ++func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request +- h.handler.ServeHTTP(w, r) ++ next.ServeHTTP(w, r) + return + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) +- opts := h.spanStartOptions ++ opts := []trace.SpanStartOption{ ++ trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), ++ } ++ if h.server != "" { ++ hostAttr := semconv.NetHostName(h.server) ++ opts = append(opts, trace.WithAttributes(hostAttr)) ++ } ++ opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. +@@ -137,12 +150,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + } + } + +- opts = append([]trace.SpanStartOption{ +- trace.WithAttributes(semconv.NetAttributesFromHTTPRequest("tcp", r)...), +- trace.WithAttributes(semconv.EndUserAttributesFromHTTPRequest(r)...), +- trace.WithAttributes(semconv.HTTPServerAttributesFromHTTPRequest(h.operation, "", r)...), +- }, opts...) // start with the configured options +- + tracer := h.tracer + + if tracer == nil { +@@ -180,7 +187,13 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + } + } + +- rww := &respWriterWrapper{ResponseWriter: w, record: writeRecordFunc, ctx: ctx, props: h.propagators} ++ rww := &respWriterWrapper{ ++ ResponseWriter: w, ++ record: writeRecordFunc, ++ ctx: ctx, ++ props: h.propagators, ++ statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything ++ } + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, +@@ -201,19 +214,23 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + +- h.handler.ServeHTTP(w, r.WithContext(ctx)) ++ next.ServeHTTP(w, r.WithContext(ctx)) + + setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) + + // Add metrics +- attributes := append(labeler.Get(), semconv.HTTPServerMetricAttributesFromHTTPRequest(h.operation, r)...) +- h.counters[RequestContentLength].Add(ctx, bw.read, attributes...) +- h.counters[ResponseContentLength].Add(ctx, rww.written, attributes...) ++ attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) ++ if rww.statusCode > 0 { ++ attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) ++ } ++ o := metric.WithAttributes(attributes...) ++ h.counters[RequestContentLength].Add(ctx, bw.read, o) ++ h.counters[ResponseContentLength].Add(ctx, rww.written, o) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + +- h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, attributes...) ++ h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, o) + } + + func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { +@@ -231,21 +248,28 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, + attributes = append(attributes, WroteBytesKey.Int64(wrote)) + } + if statusCode > 0 { +- attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...) +- span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindServer)) ++ attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } ++ span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) ++ + if werr != nil && werr != io.EOF { + attributes = append(attributes, WriteErrorKey.String(werr.Error())) + } + span.SetAttributes(attributes...) + } + +-// WithRouteTag annotates a span with the provided route name using the +-// RouteKey Tag. ++// WithRouteTag annotates spans and metrics with the provided route name ++// with HTTP route attribute. + func WithRouteTag(route string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ++ attr := semconv.HTTPRouteKey.String(route) ++ + span := trace.SpanFromContext(r.Context()) +- span.SetAttributes(semconv.HTTPRouteKey.String(route)) ++ span.SetAttributes(attr) ++ ++ labeler, _ := LabelerFromContext(r.Context()) ++ labeler.Add(attr) ++ + h.ServeHTTP(w, r) + }) + } +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +new file mode 100644 +index 00000000000..edf4ce3d315 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +@@ -0,0 +1,21 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" ++ ++// Generate semconvutil package: ++//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go ++//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go ++//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go ++//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +new file mode 100644 +index 00000000000..d3dede9ebbd +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +@@ -0,0 +1,552 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/semconvutil/httpconv.go.tmpl ++ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" ++ ++import ( ++ "fmt" ++ "net/http" ++ "strings" ++ ++ "go.opentelemetry.io/otel/attribute" ++ "go.opentelemetry.io/otel/codes" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ++) ++ ++// HTTPClientResponse returns trace attributes for an HTTP response received by a ++// client from a server. It will return the following attributes if the related ++// values are defined in resp: "http.status.code", ++// "http.response_content_length". ++// ++// This does not add all OpenTelemetry required attributes for an HTTP event, ++// it assumes ClientRequest was used to create the span with a complete set of ++// attributes. If a complete set of attributes can be generated using the ++// request contained in resp. For example: ++// ++// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) ++func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { ++ return hc.ClientResponse(resp) ++} ++ ++// HTTPClientRequest returns trace attributes for an HTTP request made by a client. ++// The following attributes are always returned: "http.url", "http.flavor", ++// "http.method", "net.peer.name". The following attributes are returned if the ++// related values are defined in req: "net.peer.port", "http.user_agent", ++// "http.request_content_length", "enduser.id". ++func HTTPClientRequest(req *http.Request) []attribute.KeyValue { ++ return hc.ClientRequest(req) ++} ++ ++// HTTPClientStatus returns a span status code and message for an HTTP status code ++// value received by a client. ++func HTTPClientStatus(code int) (codes.Code, string) { ++ return hc.ClientStatus(code) ++} ++ ++// HTTPServerRequest returns trace attributes for an HTTP request received by a ++// server. ++// ++// The server must be the primary server name if it is known. For example this ++// would be the ServerName directive ++// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache ++// server, and the server_name directive ++// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an ++// nginx server. More generically, the primary server name would be the host ++// header value that matches the default virtual host of an HTTP server. It ++// should include the host identifier and if a port is used to route to the ++// server that port identifier should be included as an appropriate port ++// suffix. ++// ++// If the primary server name is not known, server should be an empty string. ++// The req Host will be used to determine the server instead. ++// ++// The following attributes are always returned: "http.method", "http.scheme", ++// "http.flavor", "http.target", "net.host.name". The following attributes are ++// returned if they related values are defined in req: "net.host.port", ++// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", ++// "http.client_ip". ++func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { ++ return hc.ServerRequest(server, req) ++} ++ ++// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a ++// server. ++// ++// The server must be the primary server name if it is known. For example this ++// would be the ServerName directive ++// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache ++// server, and the server_name directive ++// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an ++// nginx server. More generically, the primary server name would be the host ++// header value that matches the default virtual host of an HTTP server. It ++// should include the host identifier and if a port is used to route to the ++// server that port identifier should be included as an appropriate port ++// suffix. ++// ++// If the primary server name is not known, server should be an empty string. ++// The req Host will be used to determine the server instead. ++// ++// The following attributes are always returned: "http.method", "http.scheme", ++// "http.flavor", "net.host.name". The following attributes are ++// returned if they related values are defined in req: "net.host.port". ++func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { ++ return hc.ServerRequestMetrics(server, req) ++} ++ ++// HTTPServerStatus returns a span status code and message for an HTTP status code ++// value returned by a server. Status codes in the 400-499 range are not ++// returned as errors. ++func HTTPServerStatus(code int) (codes.Code, string) { ++ return hc.ServerStatus(code) ++} ++ ++// HTTPRequestHeader returns the contents of h as attributes. ++// ++// Instrumentation should require an explicit configuration of which headers to ++// captured and then prune what they pass here. Including all headers can be a ++// security risk - explicit configuration helps avoid leaking sensitive ++// information. ++// ++// The User-Agent header is already captured in the http.user_agent attribute ++// from ClientRequest and ServerRequest. Instrumentation may provide an option ++// to capture that header here even though it is not recommended. Otherwise, ++// instrumentation should filter that out of what is passed. ++func HTTPRequestHeader(h http.Header) []attribute.KeyValue { ++ return hc.RequestHeader(h) ++} ++ ++// HTTPResponseHeader returns the contents of h as attributes. ++// ++// Instrumentation should require an explicit configuration of which headers to ++// captured and then prune what they pass here. Including all headers can be a ++// security risk - explicit configuration helps avoid leaking sensitive ++// information. ++// ++// The User-Agent header is already captured in the http.user_agent attribute ++// from ClientRequest and ServerRequest. Instrumentation may provide an option ++// to capture that header here even though it is not recommended. Otherwise, ++// instrumentation should filter that out of what is passed. ++func HTTPResponseHeader(h http.Header) []attribute.KeyValue { ++ return hc.ResponseHeader(h) ++} ++ ++// httpConv are the HTTP semantic convention attributes defined for a version ++// of the OpenTelemetry specification. ++type httpConv struct { ++ NetConv *netConv ++ ++ EnduserIDKey attribute.Key ++ HTTPClientIPKey attribute.Key ++ HTTPFlavorKey attribute.Key ++ HTTPMethodKey attribute.Key ++ HTTPRequestContentLengthKey attribute.Key ++ HTTPResponseContentLengthKey attribute.Key ++ HTTPRouteKey attribute.Key ++ HTTPSchemeHTTP attribute.KeyValue ++ HTTPSchemeHTTPS attribute.KeyValue ++ HTTPStatusCodeKey attribute.Key ++ HTTPTargetKey attribute.Key ++ HTTPURLKey attribute.Key ++ HTTPUserAgentKey attribute.Key ++} ++ ++var hc = &httpConv{ ++ NetConv: nc, ++ ++ EnduserIDKey: semconv.EnduserIDKey, ++ HTTPClientIPKey: semconv.HTTPClientIPKey, ++ HTTPFlavorKey: semconv.HTTPFlavorKey, ++ HTTPMethodKey: semconv.HTTPMethodKey, ++ HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, ++ HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, ++ HTTPRouteKey: semconv.HTTPRouteKey, ++ HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, ++ HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, ++ HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, ++ HTTPTargetKey: semconv.HTTPTargetKey, ++ HTTPURLKey: semconv.HTTPURLKey, ++ HTTPUserAgentKey: semconv.HTTPUserAgentKey, ++} ++ ++// ClientResponse returns attributes for an HTTP response received by a client ++// from a server. The following attributes are returned if the related values ++// are defined in resp: "http.status.code", "http.response_content_length". ++// ++// This does not add all OpenTelemetry required attributes for an HTTP event, ++// it assumes ClientRequest was used to create the span with a complete set of ++// attributes. If a complete set of attributes can be generated using the ++// request contained in resp. For example: ++// ++// append(ClientResponse(resp), ClientRequest(resp.Request)...) ++func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { ++ var n int ++ if resp.StatusCode > 0 { ++ n++ ++ } ++ if resp.ContentLength > 0 { ++ n++ ++ } ++ ++ attrs := make([]attribute.KeyValue, 0, n) ++ if resp.StatusCode > 0 { ++ attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) ++ } ++ if resp.ContentLength > 0 { ++ attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) ++ } ++ return attrs ++} ++ ++// ClientRequest returns attributes for an HTTP request made by a client. The ++// following attributes are always returned: "http.url", "http.flavor", ++// "http.method", "net.peer.name". The following attributes are returned if the ++// related values are defined in req: "net.peer.port", "http.user_agent", ++// "http.request_content_length", "enduser.id". ++func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { ++ n := 3 // URL, peer name, proto, and method. ++ var h string ++ if req.URL != nil { ++ h = req.URL.Host ++ } ++ peer, p := firstHostPort(h, req.Header.Get("Host")) ++ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) ++ if port > 0 { ++ n++ ++ } ++ useragent := req.UserAgent() ++ if useragent != "" { ++ n++ ++ } ++ if req.ContentLength > 0 { ++ n++ ++ } ++ userID, _, hasUserID := req.BasicAuth() ++ if hasUserID { ++ n++ ++ } ++ attrs := make([]attribute.KeyValue, 0, n) ++ ++ attrs = append(attrs, c.method(req.Method)) ++ attrs = append(attrs, c.flavor(req.Proto)) ++ ++ var u string ++ if req.URL != nil { ++ // Remove any username/password info that may be in the URL. ++ userinfo := req.URL.User ++ req.URL.User = nil ++ u = req.URL.String() ++ // Restore any username/password info that was removed. ++ req.URL.User = userinfo ++ } ++ attrs = append(attrs, c.HTTPURLKey.String(u)) ++ ++ attrs = append(attrs, c.NetConv.PeerName(peer)) ++ if port > 0 { ++ attrs = append(attrs, c.NetConv.PeerPort(port)) ++ } ++ ++ if useragent != "" { ++ attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) ++ } ++ ++ if l := req.ContentLength; l > 0 { ++ attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) ++ } ++ ++ if hasUserID { ++ attrs = append(attrs, c.EnduserIDKey.String(userID)) ++ } ++ ++ return attrs ++} ++ ++// ServerRequest returns attributes for an HTTP request received by a server. ++// ++// The server must be the primary server name if it is known. For example this ++// would be the ServerName directive ++// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache ++// server, and the server_name directive ++// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an ++// nginx server. More generically, the primary server name would be the host ++// header value that matches the default virtual host of an HTTP server. It ++// should include the host identifier and if a port is used to route to the ++// server that port identifier should be included as an appropriate port ++// suffix. ++// ++// If the primary server name is not known, server should be an empty string. ++// The req Host will be used to determine the server instead. ++// ++// The following attributes are always returned: "http.method", "http.scheme", ++// "http.flavor", "http.target", "net.host.name". The following attributes are ++// returned if they related values are defined in req: "net.host.port", ++// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", ++// "http.client_ip". ++func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { ++ // TODO: This currently does not add the specification required ++ // `http.target` attribute. It has too high of a cardinality to safely be ++ // added. An alternate should be added, or this comment removed, when it is ++ // addressed by the specification. If it is ultimately decided to continue ++ // not including the attribute, the HTTPTargetKey field of the httpConv ++ // should be removed as well. ++ ++ n := 4 // Method, scheme, proto, and host name. ++ var host string ++ var p int ++ if server == "" { ++ host, p = splitHostPort(req.Host) ++ } else { ++ // Prioritize the primary server name. ++ host, p = splitHostPort(server) ++ if p < 0 { ++ _, p = splitHostPort(req.Host) ++ } ++ } ++ hostPort := requiredHTTPPort(req.TLS != nil, p) ++ if hostPort > 0 { ++ n++ ++ } ++ peer, peerPort := splitHostPort(req.RemoteAddr) ++ if peer != "" { ++ n++ ++ if peerPort > 0 { ++ n++ ++ } ++ } ++ useragent := req.UserAgent() ++ if useragent != "" { ++ n++ ++ } ++ userID, _, hasUserID := req.BasicAuth() ++ if hasUserID { ++ n++ ++ } ++ clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) ++ if clientIP != "" { ++ n++ ++ } ++ attrs := make([]attribute.KeyValue, 0, n) ++ ++ attrs = append(attrs, c.method(req.Method)) ++ attrs = append(attrs, c.scheme(req.TLS != nil)) ++ attrs = append(attrs, c.flavor(req.Proto)) ++ attrs = append(attrs, c.NetConv.HostName(host)) ++ ++ if hostPort > 0 { ++ attrs = append(attrs, c.NetConv.HostPort(hostPort)) ++ } ++ ++ if peer != "" { ++ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a ++ // file-path that would be interpreted with a sock family. ++ attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) ++ if peerPort > 0 { ++ attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) ++ } ++ } ++ ++ if useragent != "" { ++ attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) ++ } ++ ++ if hasUserID { ++ attrs = append(attrs, c.EnduserIDKey.String(userID)) ++ } ++ ++ if clientIP != "" { ++ attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) ++ } ++ ++ return attrs ++} ++ ++// ServerRequestMetrics returns metric attributes for an HTTP request received ++// by a server. ++// ++// The server must be the primary server name if it is known. For example this ++// would be the ServerName directive ++// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache ++// server, and the server_name directive ++// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an ++// nginx server. More generically, the primary server name would be the host ++// header value that matches the default virtual host of an HTTP server. It ++// should include the host identifier and if a port is used to route to the ++// server that port identifier should be included as an appropriate port ++// suffix. ++// ++// If the primary server name is not known, server should be an empty string. ++// The req Host will be used to determine the server instead. ++// ++// The following attributes are always returned: "http.method", "http.scheme", ++// "http.flavor", "net.host.name". The following attributes are ++// returned if they related values are defined in req: "net.host.port". ++func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { ++ // TODO: This currently does not add the specification required ++ // `http.target` attribute. It has too high of a cardinality to safely be ++ // added. An alternate should be added, or this comment removed, when it is ++ // addressed by the specification. If it is ultimately decided to continue ++ // not including the attribute, the HTTPTargetKey field of the httpConv ++ // should be removed as well. ++ ++ n := 4 // Method, scheme, proto, and host name. ++ var host string ++ var p int ++ if server == "" { ++ host, p = splitHostPort(req.Host) ++ } else { ++ // Prioritize the primary server name. ++ host, p = splitHostPort(server) ++ if p < 0 { ++ _, p = splitHostPort(req.Host) ++ } ++ } ++ hostPort := requiredHTTPPort(req.TLS != nil, p) ++ if hostPort > 0 { ++ n++ ++ } ++ attrs := make([]attribute.KeyValue, 0, n) ++ ++ attrs = append(attrs, c.methodMetric(req.Method)) ++ attrs = append(attrs, c.scheme(req.TLS != nil)) ++ attrs = append(attrs, c.flavor(req.Proto)) ++ attrs = append(attrs, c.NetConv.HostName(host)) ++ ++ if hostPort > 0 { ++ attrs = append(attrs, c.NetConv.HostPort(hostPort)) ++ } ++ ++ return attrs ++} ++ ++func (c *httpConv) method(method string) attribute.KeyValue { ++ if method == "" { ++ return c.HTTPMethodKey.String(http.MethodGet) ++ } ++ return c.HTTPMethodKey.String(method) ++} ++ ++func (c *httpConv) methodMetric(method string) attribute.KeyValue { ++ method = strings.ToUpper(method) ++ switch method { ++ case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: ++ default: ++ method = "_OTHER" ++ } ++ return c.HTTPMethodKey.String(method) ++} ++ ++func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive ++ if https { ++ return c.HTTPSchemeHTTPS ++ } ++ return c.HTTPSchemeHTTP ++} ++ ++func (c *httpConv) flavor(proto string) attribute.KeyValue { ++ switch proto { ++ case "HTTP/1.0": ++ return c.HTTPFlavorKey.String("1.0") ++ case "HTTP/1.1": ++ return c.HTTPFlavorKey.String("1.1") ++ case "HTTP/2": ++ return c.HTTPFlavorKey.String("2.0") ++ case "HTTP/3": ++ return c.HTTPFlavorKey.String("3.0") ++ default: ++ return c.HTTPFlavorKey.String(proto) ++ } ++} ++ ++func serverClientIP(xForwardedFor string) string { ++ if idx := strings.Index(xForwardedFor, ","); idx >= 0 { ++ xForwardedFor = xForwardedFor[:idx] ++ } ++ return xForwardedFor ++} ++ ++func requiredHTTPPort(https bool, port int) int { // nolint:revive ++ if https { ++ if port > 0 && port != 443 { ++ return port ++ } ++ } else { ++ if port > 0 && port != 80 { ++ return port ++ } ++ } ++ return -1 ++} ++ ++// Return the request host and port from the first non-empty source. ++func firstHostPort(source ...string) (host string, port int) { ++ for _, hostport := range source { ++ host, port = splitHostPort(hostport) ++ if host != "" || port > 0 { ++ break ++ } ++ } ++ return ++} ++ ++// RequestHeader returns the contents of h as OpenTelemetry attributes. ++func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue { ++ return c.header("http.request.header", h) ++} ++ ++// ResponseHeader returns the contents of h as OpenTelemetry attributes. ++func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue { ++ return c.header("http.response.header", h) ++} ++ ++func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue { ++ key := func(k string) attribute.Key { ++ k = strings.ToLower(k) ++ k = strings.ReplaceAll(k, "-", "_") ++ k = fmt.Sprintf("%s.%s", prefix, k) ++ return attribute.Key(k) ++ } ++ ++ attrs := make([]attribute.KeyValue, 0, len(h)) ++ for k, v := range h { ++ attrs = append(attrs, key(k).StringSlice(v)) ++ } ++ return attrs ++} ++ ++// ClientStatus returns a span status code and message for an HTTP status code ++// value received by a client. ++func (c *httpConv) ClientStatus(code int) (codes.Code, string) { ++ if code < 100 || code >= 600 { ++ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) ++ } ++ if code >= 400 { ++ return codes.Error, "" ++ } ++ return codes.Unset, "" ++} ++ ++// ServerStatus returns a span status code and message for an HTTP status code ++// value returned by a server. Status codes in the 400-499 range are not ++// returned as errors. ++func (c *httpConv) ServerStatus(code int) (codes.Code, string) { ++ if code < 100 || code >= 600 { ++ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) ++ } ++ if code >= 500 { ++ return codes.Error, "" ++ } ++ return codes.Unset, "" ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +new file mode 100644 +index 00000000000..bde8893437d +--- /dev/null ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +@@ -0,0 +1,368 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/semconvutil/netconv.go.tmpl ++ ++// Copyright The OpenTelemetry Authors ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" ++ ++import ( ++ "net" ++ "strconv" ++ "strings" ++ ++ "go.opentelemetry.io/otel/attribute" ++ semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ++) ++ ++// NetTransport returns a trace attribute describing the transport protocol of the ++// passed network. See the net.Dial for information about acceptable network ++// values. ++func NetTransport(network string) attribute.KeyValue { ++ return nc.Transport(network) ++} ++ ++// NetClient returns trace attributes for a client network connection to address. ++// See net.Dial for information about acceptable address values, address should ++// be the same as the one used to create conn. If conn is nil, only network ++// peer attributes will be returned that describe address. Otherwise, the ++// socket level information about conn will also be included. ++func NetClient(address string, conn net.Conn) []attribute.KeyValue { ++ return nc.Client(address, conn) ++} ++ ++// NetServer returns trace attributes for a network listener listening at address. ++// See net.Listen for information about acceptable address values, address ++// should be the same as the one used to create ln. If ln is nil, only network ++// host attributes will be returned that describe address. Otherwise, the ++// socket level information about ln will also be included. ++func NetServer(address string, ln net.Listener) []attribute.KeyValue { ++ return nc.Server(address, ln) ++} ++ ++// netConv are the network semantic convention attributes defined for a version ++// of the OpenTelemetry specification. ++type netConv struct { ++ NetHostNameKey attribute.Key ++ NetHostPortKey attribute.Key ++ NetPeerNameKey attribute.Key ++ NetPeerPortKey attribute.Key ++ NetSockFamilyKey attribute.Key ++ NetSockPeerAddrKey attribute.Key ++ NetSockPeerPortKey attribute.Key ++ NetSockHostAddrKey attribute.Key ++ NetSockHostPortKey attribute.Key ++ NetTransportOther attribute.KeyValue ++ NetTransportTCP attribute.KeyValue ++ NetTransportUDP attribute.KeyValue ++ NetTransportInProc attribute.KeyValue ++} ++ ++var nc = &netConv{ ++ NetHostNameKey: semconv.NetHostNameKey, ++ NetHostPortKey: semconv.NetHostPortKey, ++ NetPeerNameKey: semconv.NetPeerNameKey, ++ NetPeerPortKey: semconv.NetPeerPortKey, ++ NetSockFamilyKey: semconv.NetSockFamilyKey, ++ NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, ++ NetSockPeerPortKey: semconv.NetSockPeerPortKey, ++ NetSockHostAddrKey: semconv.NetSockHostAddrKey, ++ NetSockHostPortKey: semconv.NetSockHostPortKey, ++ NetTransportOther: semconv.NetTransportOther, ++ NetTransportTCP: semconv.NetTransportTCP, ++ NetTransportUDP: semconv.NetTransportUDP, ++ NetTransportInProc: semconv.NetTransportInProc, ++} ++ ++func (c *netConv) Transport(network string) attribute.KeyValue { ++ switch network { ++ case "tcp", "tcp4", "tcp6": ++ return c.NetTransportTCP ++ case "udp", "udp4", "udp6": ++ return c.NetTransportUDP ++ case "unix", "unixgram", "unixpacket": ++ return c.NetTransportInProc ++ default: ++ // "ip:*", "ip4:*", and "ip6:*" all are considered other. ++ return c.NetTransportOther ++ } ++} ++ ++// Host returns attributes for a network host address. ++func (c *netConv) Host(address string) []attribute.KeyValue { ++ h, p := splitHostPort(address) ++ var n int ++ if h != "" { ++ n++ ++ if p > 0 { ++ n++ ++ } ++ } ++ ++ if n == 0 { ++ return nil ++ } ++ ++ attrs := make([]attribute.KeyValue, 0, n) ++ attrs = append(attrs, c.HostName(h)) ++ if p > 0 { ++ attrs = append(attrs, c.HostPort(int(p))) ++ } ++ return attrs ++} ++ ++// Server returns attributes for a network listener listening at address. See ++// net.Listen for information about acceptable address values, address should ++// be the same as the one used to create ln. If ln is nil, only network host ++// attributes will be returned that describe address. Otherwise, the socket ++// level information about ln will also be included. ++func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { ++ if ln == nil { ++ return c.Host(address) ++ } ++ ++ lAddr := ln.Addr() ++ if lAddr == nil { ++ return c.Host(address) ++ } ++ ++ hostName, hostPort := splitHostPort(address) ++ sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) ++ network := lAddr.Network() ++ sockFamily := family(network, sockHostAddr) ++ ++ n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) ++ n += positiveInt(hostPort, sockHostPort) ++ attr := make([]attribute.KeyValue, 0, n) ++ if hostName != "" { ++ attr = append(attr, c.HostName(hostName)) ++ if hostPort > 0 { ++ // Only if net.host.name is set should net.host.port be. ++ attr = append(attr, c.HostPort(hostPort)) ++ } ++ } ++ if network != "" { ++ attr = append(attr, c.Transport(network)) ++ } ++ if sockFamily != "" { ++ attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) ++ } ++ if sockHostAddr != "" { ++ attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) ++ if sockHostPort > 0 { ++ // Only if net.sock.host.addr is set should net.sock.host.port be. ++ attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) ++ } ++ } ++ return attr ++} ++ ++func (c *netConv) HostName(name string) attribute.KeyValue { ++ return c.NetHostNameKey.String(name) ++} ++ ++func (c *netConv) HostPort(port int) attribute.KeyValue { ++ return c.NetHostPortKey.Int(port) ++} ++ ++// Client returns attributes for a client network connection to address. See ++// net.Dial for information about acceptable address values, address should be ++// the same as the one used to create conn. If conn is nil, only network peer ++// attributes will be returned that describe address. Otherwise, the socket ++// level information about conn will also be included. ++func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue { ++ if conn == nil { ++ return c.Peer(address) ++ } ++ ++ lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() ++ ++ var network string ++ switch { ++ case lAddr != nil: ++ network = lAddr.Network() ++ case rAddr != nil: ++ network = rAddr.Network() ++ default: ++ return c.Peer(address) ++ } ++ ++ peerName, peerPort := splitHostPort(address) ++ var ( ++ sockFamily string ++ sockPeerAddr string ++ sockPeerPort int ++ sockHostAddr string ++ sockHostPort int ++ ) ++ ++ if lAddr != nil { ++ sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) ++ } ++ ++ if rAddr != nil { ++ sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) ++ } ++ ++ switch { ++ case sockHostAddr != "": ++ sockFamily = family(network, sockHostAddr) ++ case sockPeerAddr != "": ++ sockFamily = family(network, sockPeerAddr) ++ } ++ ++ n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) ++ n += positiveInt(peerPort, sockPeerPort, sockHostPort) ++ attr := make([]attribute.KeyValue, 0, n) ++ if peerName != "" { ++ attr = append(attr, c.PeerName(peerName)) ++ if peerPort > 0 { ++ // Only if net.peer.name is set should net.peer.port be. ++ attr = append(attr, c.PeerPort(peerPort)) ++ } ++ } ++ if network != "" { ++ attr = append(attr, c.Transport(network)) ++ } ++ if sockFamily != "" { ++ attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) ++ } ++ if sockPeerAddr != "" { ++ attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) ++ if sockPeerPort > 0 { ++ // Only if net.sock.peer.addr is set should net.sock.peer.port be. ++ attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) ++ } ++ } ++ if sockHostAddr != "" { ++ attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) ++ if sockHostPort > 0 { ++ // Only if net.sock.host.addr is set should net.sock.host.port be. ++ attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) ++ } ++ } ++ return attr ++} ++ ++func family(network, address string) string { ++ switch network { ++ case "unix", "unixgram", "unixpacket": ++ return "unix" ++ default: ++ if ip := net.ParseIP(address); ip != nil { ++ if ip.To4() == nil { ++ return "inet6" ++ } ++ return "inet" ++ } ++ } ++ return "" ++} ++ ++func nonZeroStr(strs ...string) int { ++ var n int ++ for _, str := range strs { ++ if str != "" { ++ n++ ++ } ++ } ++ return n ++} ++ ++func positiveInt(ints ...int) int { ++ var n int ++ for _, i := range ints { ++ if i > 0 { ++ n++ ++ } ++ } ++ return n ++} ++ ++// Peer returns attributes for a network peer address. ++func (c *netConv) Peer(address string) []attribute.KeyValue { ++ h, p := splitHostPort(address) ++ var n int ++ if h != "" { ++ n++ ++ if p > 0 { ++ n++ ++ } ++ } ++ ++ if n == 0 { ++ return nil ++ } ++ ++ attrs := make([]attribute.KeyValue, 0, n) ++ attrs = append(attrs, c.PeerName(h)) ++ if p > 0 { ++ attrs = append(attrs, c.PeerPort(int(p))) ++ } ++ return attrs ++} ++ ++func (c *netConv) PeerName(name string) attribute.KeyValue { ++ return c.NetPeerNameKey.String(name) ++} ++ ++func (c *netConv) PeerPort(port int) attribute.KeyValue { ++ return c.NetPeerPortKey.Int(port) ++} ++ ++func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { ++ return c.NetSockPeerAddrKey.String(addr) ++} ++ ++func (c *netConv) SockPeerPort(port int) attribute.KeyValue { ++ return c.NetSockPeerPortKey.Int(port) ++} ++ ++// splitHostPort splits a network address hostport of the form "host", ++// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", ++// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and ++// port. ++// ++// An empty host is returned if it is not provided or unparsable. A negative ++// port is returned if it is not provided or unparsable. ++func splitHostPort(hostport string) (host string, port int) { ++ port = -1 ++ ++ if strings.HasPrefix(hostport, "[") { ++ addrEnd := strings.LastIndex(hostport, "]") ++ if addrEnd < 0 { ++ // Invalid hostport. ++ return ++ } ++ if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { ++ host = hostport[1:addrEnd] ++ return ++ } ++ } else { ++ if i := strings.LastIndex(hostport, ":"); i < 0 { ++ host = hostport ++ return ++ } ++ } ++ ++ host, pStr, err := net.SplitHostPort(hostport) ++ if err != nil { ++ return ++ } ++ ++ p, err := strconv.ParseUint(pStr, 10, 16) ++ if err != nil { ++ return ++ } ++ return host, int(p) ++} +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +index fd5e1e9bc75..e835cac12e4 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +@@ -20,10 +20,10 @@ import ( + "net/http" + "net/http/httptrace" + ++ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + "go.opentelemetry.io/otel/trace" + ) + +@@ -109,8 +109,8 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + +- r = r.WithContext(ctx) +- span.SetAttributes(semconv.HTTPClientAttributesFromHTTPRequest(r)...) ++ r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. ++ span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) +@@ -121,8 +121,8 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + return res, err + } + +- span.SetAttributes(semconv.HTTPAttributesFromHTTPStatusCode(res.StatusCode)...) +- span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(res.StatusCode)) ++ span.SetAttributes(semconvutil.HTTPClientResponse(res)...) ++ span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + res.Body = newWrappedBody(span, res.Body) + + return res, err +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +index 210ee0b787f..8f3f53a9588 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +@@ -16,11 +16,13 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http + + // Version is the current release version of the otelhttp instrumentation. + func Version() string { +- return "0.35.1" ++ return "0.44.0" + // This string is updated by the pre_release.sh script during release + } + + // SemVersion is the semantic version to be supplied to tracer/meter creation. ++// ++// Deprecated: Use [Version] instead. + func SemVersion() string { +- return "semver:" + Version() ++ return Version() + } +diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +index da6468c4e59..11a35ed167f 100644 +--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go ++++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +@@ -50,7 +50,7 @@ func (w *bodyWrapper) Close() error { + var _ http.ResponseWriter = &respWriterWrapper{} + + // respWriterWrapper wraps a http.ResponseWriter in order to track the number of +-// bytes written, the last error, and to catch the returned statusCode ++// bytes written, the last error, and to catch the first written statusCode. + // TODO: The wrapped http.ResponseWriter doesn't implement any of the optional + // types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) + // that may be useful when using it in real life situations. +@@ -85,11 +85,15 @@ func (w *respWriterWrapper) Write(p []byte) (int, error) { + return n, err + } + ++// WriteHeader persists initial statusCode for span attribution. ++// All calls to WriteHeader will be propagated to the underlying ResponseWriter ++// and will persist the statusCode from the first call. ++// Blocking consecutive calls to WriteHeader alters expected behavior and will ++// remove warning logs from net/http where developers will notice incorrect handler implementations. + func (w *respWriterWrapper) WriteHeader(statusCode int) { +- if w.wroteHeader { +- return ++ if !w.wroteHeader { ++ w.wroteHeader = true ++ w.statusCode = statusCode + } +- w.wroteHeader = true +- w.statusCode = statusCode + w.ResponseWriter.WriteHeader(statusCode) + } +diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore +new file mode 100644 +index 00000000000..ae6a3bcf12c +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/.codespellignore +@@ -0,0 +1,5 @@ ++ot ++fo ++te ++collison ++consequentially +diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc +new file mode 100644 +index 00000000000..4afbb1fb3bd +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/.codespellrc +@@ -0,0 +1,10 @@ ++# https://github.com/codespell-project/codespell ++[codespell] ++builtin = clear,rare,informal ++check-filenames = ++check-hidden = ++ignore-words = .codespellignore ++interactive = 1 ++skip = .git,go.mod,go.sum,semconv,venv,.tools ++uri-ignore-words-list = * ++write = +diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore +index 0b605b3d67d..9248055655b 100644 +--- a/vendor/go.opentelemetry.io/otel/.gitignore ++++ b/vendor/go.opentelemetry.io/otel/.gitignore +@@ -2,20 +2,25 @@ + Thumbs.db + + .tools/ ++venv/ + .idea/ + .vscode/ + *.iml + *.so + coverage.* ++go.work ++go.work.sum + + gen/ + ++/example/dice/dice + /example/fib/fib + /example/fib/traces.txt + /example/jaeger/jaeger + /example/namedtracer/namedtracer ++/example/otel-collector/otel-collector + /example/opencensus/opencensus + /example/passthrough/passthrough + /example/prometheus/prometheus ++/example/view/view + /example/zipkin/zipkin +-/example/otel-collector/otel-collector +diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml +index 253e3b35b52..a62511f382e 100644 +--- a/vendor/go.opentelemetry.io/otel/.golangci.yml ++++ b/vendor/go.opentelemetry.io/otel/.golangci.yml +@@ -9,22 +9,20 @@ linters: + disable-all: true + # Specifically enable linters we want to use. + enable: +- - deadcode + - depguard + - errcheck + - godot +- - gofmt ++ - gofumpt + - goimports ++ - gosec + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck +- - structcheck + - typecheck + - unused +- - varcheck + + issues: + # Maximum issues count per one linter. +@@ -56,6 +54,20 @@ issues: + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive ++ # It's okay to not run gosec in a test. ++ - path: _test\.go ++ linters: ++ - gosec ++ # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) ++ # as we commonly use it in tests and examples. ++ - text: "G404:" ++ linters: ++ - gosec ++ # Igonoring gosec G402: TLS MinVersion too low ++ # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. ++ - text: "G402: TLS MinVersion too low." ++ linters: ++ - gosec + include: + # revive exported should have comment or be unexported. + - EXC0012 +@@ -64,30 +76,67 @@ issues: + + linters-settings: + depguard: +- # Check the list against standard lib. +- # Default: false +- include-go-root: true +- # A list of packages for the list type specified. +- # Default: [] +- packages: +- - "crypto/md5" +- - "crypto/sha1" +- - "crypto/**/pkix" +- ignore-file-rules: +- - "**/*_test.go" +- additional-guards: +- # Do not allow testing packages in non-test files. +- - list-type: denylist +- include-go-root: true +- packages: +- - testing +- - github.com/stretchr/testify +- ignore-file-rules: +- - "**/*_test.go" +- - "**/*test/*.go" +- - "**/internal/matchers/*.go" ++ rules: ++ non-tests: ++ files: ++ - "!$test" ++ - "!**/*test/*.go" ++ - "!**/internal/matchers/*.go" ++ deny: ++ - pkg: "testing" ++ - pkg: "github.com/stretchr/testify" ++ - pkg: "crypto/md5" ++ - pkg: "crypto/sha1" ++ - pkg: "crypto/**/pkix" ++ otlp-internal: ++ files: ++ - "!**/exporters/otlp/internal/**/*.go" ++ deny: ++ - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" ++ desc: Do not use cross-module internal packages. ++ otlptrace-internal: ++ files: ++ - "!**/exporters/otlp/otlptrace/*.go" ++ - "!**/exporters/otlp/otlptrace/internal/**.go" ++ deny: ++ - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" ++ desc: Do not use cross-module internal packages. ++ otlpmetric-internal: ++ files: ++ - "!**/exporters/otlp/otlpmetric/internal/*.go" ++ - "!**/exporters/otlp/otlpmetric/internal/**/*.go" ++ deny: ++ - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" ++ desc: Do not use cross-module internal packages. ++ otel-internal: ++ files: ++ - "**/sdk/*.go" ++ - "**/sdk/**/*.go" ++ - "**/exporters/*.go" ++ - "**/exporters/**/*.go" ++ - "**/schema/*.go" ++ - "**/schema/**/*.go" ++ - "**/metric/*.go" ++ - "**/metric/**/*.go" ++ - "**/bridge/*.go" ++ - "**/bridge/**/*.go" ++ - "**/example/*.go" ++ - "**/example/**/*.go" ++ - "**/trace/*.go" ++ - "**/trace/**/*.go" ++ deny: ++ - pkg: "go.opentelemetry.io/otel/internal$" ++ desc: Do not use cross-module internal packages. ++ - pkg: "go.opentelemetry.io/otel/internal/attribute" ++ desc: Do not use cross-module internal packages. ++ - pkg: "go.opentelemetry.io/otel/internal/internaltest" ++ desc: Do not use cross-module internal packages. ++ - pkg: "go.opentelemetry.io/otel/internal/matchers" ++ desc: Do not use cross-module internal packages. + godot: + exclude: ++ # Exclude links. ++ - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. +@@ -114,8 +163,9 @@ linters-settings: + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument ++ # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument +- disabled: false ++ disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type +diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore +index 545d634525d..40d62fa2eb8 100644 +--- a/vendor/go.opentelemetry.io/otel/.lycheeignore ++++ b/vendor/go.opentelemetry.io/otel/.lycheeignore +@@ -1,3 +1,6 @@ + http://localhost + http://jaeger-collector + https://github.com/open-telemetry/opentelemetry-go/milestone/ ++https://github.com/open-telemetry/opentelemetry-go/projects ++file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries ++file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual +diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md +index 906e17ce94f..c4e7ad475f5 100644 +--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md ++++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md +@@ -8,6 +8,820 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm + + ## [Unreleased] + ++## [1.20.0/0.43.0] 2023-11-10 ++ ++This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ++ ++### Added ++ ++- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) ++- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) ++- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) ++- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) ++- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) ++- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) ++- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) ++- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) ++- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) ++- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) ++- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) ++- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) ++- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) ++ ++### Deprecated ++ ++- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) ++- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) ++- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. ++ Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) ++- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) ++- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) ++ ++### Changed ++ ++- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) ++- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. ++ This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. ++ Implementors need to update their implementations based on what they want the default behavior of the interface to be. ++ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) ++- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. ++ This extends the `Tracer` interface and is is a breaking change for any existing implementation. ++ Implementors need to update their implementations based on what they want the default behavior of the interface to be. ++ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) ++- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. ++ This extends the `Span` interface and is is a breaking change for any existing implementation. ++ Implementors need to update their implementations based on what they want the default behavior of the interface to be. ++ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) ++- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) ++- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) ++- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) ++- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) ++- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) ++- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) ++- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) ++- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) ++ ++### Fixed ++ ++- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) ++- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) ++- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) ++- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) ++- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) ++- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) ++- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) ++- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) ++- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) ++ ++## [1.19.0/0.42.0/0.0.7] 2023-09-28 ++ ++This release contains the first stable release of the OpenTelemetry Go [metric SDK]. ++Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++### Added ++ ++- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) ++- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) ++ ++### Changed ++ ++- Allow '/' characters in metric instrument names. (#4501) ++- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) ++- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) ++ ++### Fixed ++ ++- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) ++ ++### Removed ++ ++- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) ++ ++## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 ++ ++This is a release candidate for the v1.19.0/v0.42.0 release. ++That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++### Changed ++ ++- Allow '/' characters in metric instrument names. (#4501) ++ ++### Fixed ++ ++- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) ++ ++## [1.18.0/0.41.0/0.0.6] 2023-09-12 ++ ++This release drops the compatibility guarantee of [Go 1.19]. ++ ++### Added ++ ++- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) ++- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) ++ ++### Changed ++ ++- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) ++ ++### Deprecated ++ ++- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). ++ The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) ++ ++### Removed ++ ++- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) ++- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) ++- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) ++- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) ++- Dropped guaranteed support for versions of Go less than 1.20. (#4481) ++ ++## [1.17.0/0.40.0/0.0.5] 2023-08-28 ++ ++### Added ++ ++- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) ++- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) ++- Add support for exponential histogram aggregations. ++ A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) ++- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) ++- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) ++- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) ++- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) ++- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) ++- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. ++ The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) ++- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) ++- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) ++- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) ++- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) ++- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) ++- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) ++- Support Go 1.21. (#4463) ++ ++### Changed ++ ++- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) ++- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) ++- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) ++- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) ++- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) ++- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) ++- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) ++- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) ++- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) ++- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) ++- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) ++- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) ++- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) ++- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) ++- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) ++- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) ++ ++### Removed ++ ++- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. ++ Use the added `WithProducer` option instead. (#4346) ++- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. ++ Notice that `PeriodicReader.ForceFlush` is still available. (#4375) ++ ++### Fixed ++ ++- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) ++- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) ++- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) ++- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) ++- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) ++- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) ++- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) ++- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) ++- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) ++- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) ++- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) ++- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) ++- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) ++- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) ++- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) ++- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) ++- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) ++- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) ++- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) ++ ++### Deprecated ++ ++- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. ++ OpenTelemetry dropped support for Jaeger exporter in July 2023. ++ Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` ++ or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) ++- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) ++- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) ++- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) ++- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. ++ Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) ++ ++## [1.16.0/0.39.0] 2023-05-18 ++ ++This release contains the first stable release of the OpenTelemetry Go [metric API]. ++Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++### Added ++ ++- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. ++ The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) ++- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. ++ The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) ++- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) ++- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) ++- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) ++ ++### Changed ++ ++- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) ++- `MeterProvider` returns noop meters once it has been shutdown. (#4154) ++ ++### Removed ++ ++- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. ++ Use `go.opentelemetry.io/otel/metric` instead. (#4055) ++ ++### Fixed ++ ++- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) ++ ++## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 ++ ++This is a release candidate for the v1.16.0/v0.39.0 release. ++That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++### Added ++ ++- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) ++ - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. ++ - Use `GetMeterProivder` for a global `metric.MeterProvider`. ++ - Use `SetMeterProivder` to set the global `metric.MeterProvider`. ++ ++### Changed ++ ++- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. ++ This stages the metric API to be released as a stable module. (#4038) ++ ++### Removed ++ ++- The `go.opentelemetry.io/otel/metric/global` package is removed. ++ Use `go.opentelemetry.io/otel` instead. (#4039) ++ ++## [1.15.1/0.38.1] 2023-05-02 ++ ++### Fixed ++ ++- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) ++ ++## [1.15.0/0.38.0] 2023-04-27 ++ ++### Added ++ ++- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) ++- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) ++- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) ++- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) ++ - The `AddConfig` used to hold configuration for addition measurements ++ - `NewAddConfig` used to create a new `AddConfig` ++ - `AddOption` used to configure an `AddConfig` ++ - The `RecordConfig` used to hold configuration for recorded measurements ++ - `NewRecordConfig` used to create a new `RecordConfig` ++ - `RecordOption` used to configure a `RecordConfig` ++ - The `ObserveConfig` used to hold configuration for observed measurements ++ - `NewObserveConfig` used to create a new `ObserveConfig` ++ - `ObserveOption` used to configure an `ObserveConfig` ++- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. ++ They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) ++- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) ++- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) ++ ++### Changed ++ ++- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) ++- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. ++ This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) ++- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) ++ - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` ++- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) ++- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) ++- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) ++ - The `Int64Counter.Add` method now accepts `...AddOption` ++ - The `Float64Counter.Add` method now accepts `...AddOption` ++ - The `Int64UpDownCounter.Add` method now accepts `...AddOption` ++ - The `Float64UpDownCounter.Add` method now accepts `...AddOption` ++ - The `Int64Histogram.Record` method now accepts `...RecordOption` ++ - The `Float64Histogram.Record` method now accepts `...RecordOption` ++ - The `Int64Observer.Observe` method now accepts `...ObserveOption` ++ - The `Float64Observer.Observe` method now accepts `...ObserveOption` ++- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) ++ - The `Observer.ObserveInt64` method now accepts `...ObserveOption` ++ - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` ++- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) ++ ++### Fixed ++ ++- `TracerProvider` allows calling `Tracer()` while it's shutting down. ++ It used to deadlock. (#3924) ++- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) ++- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) ++- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) ++ ++### Deprecated ++ ++- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. ++ Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) ++ ++## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 ++ ++This is a release candidate for the v1.15.0/v0.38.0 release. ++That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++### Added ++ ++- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) ++- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) ++- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. ++ Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) ++- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) ++- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) ++- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) ++ ++### Changed ++ ++- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) ++- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) ++- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) ++- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) ++- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) ++- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) ++- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) ++- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) ++- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) ++- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) ++ ++### Fixed ++ ++- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) ++ ++### Removed ++ ++- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) ++- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) ++- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. ++ Use the added `float64` instrument configuration instead. (#3895) ++- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. ++ Use the added `int64` instrument configuration instead. (#3895) ++- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) ++ ++## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 ++ ++This is a release candidate for the v1.15.0/v0.38.0 release. ++That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. ++See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ++ ++This release drops the compatibility guarantee of [Go 1.18]. ++ ++### Added ++ ++- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) ++ - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. ++ - Use `GetMeterProivder` for a global `metric.MeterProvider`. ++ - Use `SetMeterProivder` to set the global `metric.MeterProvider`. ++ ++### Changed ++ ++- Dropped compatibility testing for [Go 1.18]. ++ The project no longer guarantees support for this version of Go. (#3813) ++ ++### Fixed ++ ++- Handle empty environment variable as it they were not set. (#3764) ++- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) ++- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) ++- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) ++ ++### Deprecated ++ ++- The `go.opentelemetry.io/otel/metric/global` package is deprecated. ++ Use `go.opentelemetry.io/otel` instead. (#3818) ++ ++### Removed ++ ++- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) ++ ++## [1.14.0/0.37.0/0.0.4] 2023-02-27 ++ ++This release is the last to support [Go 1.18]. ++The next release will require at least [Go 1.19]. ++ ++### Added ++ ++- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) ++- Support [Go 1.20]. (#3693) ++- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. ++ The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) ++ - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: ++ - `OtelScopeNameKey` -> `OTelScopeNameKey` ++ - `OtelScopeVersionKey` -> `OTelScopeVersionKey` ++ - `OtelLibraryNameKey` -> `OTelLibraryNameKey` ++ - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` ++ - `OtelStatusCodeKey` -> `OTelStatusCodeKey` ++ - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` ++ - `OtelStatusCodeOk` -> `OTelStatusCodeOk` ++ - `OtelStatusCodeError` -> `OTelStatusCodeError` ++ - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: ++ - `OtelScopeName` -> `OTelScopeName` ++ - `OtelScopeVersion` -> `OTelScopeVersion` ++ - `OtelLibraryName` -> `OTelLibraryName` ++ - `OtelLibraryVersion` -> `OTelLibraryVersion` ++ - `OtelStatusDescription` -> `OTelStatusDescription` ++- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. ++ See the [README](./bridge/opentracing/README.md) for more information. (#3570) ++- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) ++- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) ++- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) ++ - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. ++ - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. ++ ++### Changed ++ ++- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) ++- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. ++ This change is made to enable memory reuse by SDK users. (#3732) ++- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) ++ ++### Fixed ++ ++- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) ++- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) ++- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) ++- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) ++- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) ++- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) ++- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) ++ ++### Deprecated ++ ++- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. ++ Use the equivalent unit string instead. (#3776) ++ - Use `"1"` instead of `unit.Dimensionless` ++ - Use `"By"` instead of `unit.Bytes` ++ - Use `"ms"` instead of `unit.Milliseconds` ++ ++## [1.13.0/0.36.0] 2023-02-07 ++ ++### Added ++ ++- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. ++ These functions ensure semantic convention type correctness. (#3675) ++ ++### Fixed ++ ++- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) ++ - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` ++ - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` ++ - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` ++ - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` ++ - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` ++ ++### Removed ++ ++- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) ++- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) ++- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) ++- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) ++ ++## [1.12.0/0.35.0] 2023-01-28 ++ ++### Added ++ ++- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. ++ This options is used to configure `int64` Observer callbacks during their creation. (#3507) ++- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. ++ This options is used to configure `float64` Observer callbacks during their creation. (#3507) ++- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. ++ These additions are used to enable external metric Producers. (#3524) ++- The `Callback` function type to `go.opentelemetry.io/otel/metric`. ++ This new named function type is registered with a `Meter`. (#3564) ++- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. ++ The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) ++ - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. ++ - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. ++ - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. ++- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. ++ The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) ++- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. ++ The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) ++- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. ++ The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) ++- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. ++ These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) ++ - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` ++ - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` ++ - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` ++ - `Int64ObservableCounter` replaces the `asyncint64.Counter` ++ - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` ++ - `Int64ObservableGauge` replaces the `asyncint64.Gauge` ++ - `Float64Counter` replaces the `syncfloat64.Counter` ++ - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` ++ - `Float64Histogram` replaces the `syncfloat64.Histogram` ++ - `Int64Counter` replaces the `syncint64.Counter` ++ - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` ++ - `Int64Histogram` replaces the `syncint64.Histogram` ++- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. ++ This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) ++- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. ++ This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) ++- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. ++ The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) ++ ++### Changed ++ ++- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) ++- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) ++ - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. ++ - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. ++ - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. ++ - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. ++- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. ++ This `Registration` can be used to unregister callbacks. (#3522) ++- Global error handler uses an atomic value instead of a mutex. (#3543) ++- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) ++- Global logger uses an atomic value instead of a mutex. (#3545) ++- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) ++- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. ++ This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) ++- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. ++ Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) ++- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) ++- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) ++ - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` ++ - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` ++ - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` ++ - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` ++ - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` ++ - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` ++- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. ++ - The named `Callback` replaces the inline function parameter. (#3564) ++ - `Callback` is required to return an error. (#3576) ++ - `Callback` accepts the added `Observer` parameter added. ++ This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) ++ - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) ++- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. ++ This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. ++ Instead it uses the `net.sock.peer` attributes. (#3581) ++- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) ++ ++### Fixed ++ ++- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) ++- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. ++ Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) ++ ++### Deprecated ++ ++- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. ++ Use `NewMetricProducer` instead. (#3541) ++- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. ++ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) ++- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. ++ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) ++- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. ++ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) ++- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. ++ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) ++- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. ++ Use `NewTracerProvider` instead. (#3116) ++ ++### Removed ++ ++- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) ++- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. ++ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) ++ - The `Counter` method is replaced by `Meter.Int64ObservableCounter` ++ - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` ++ - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` ++- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. ++ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) ++ - The `Counter` method is replaced by `Meter.Float64ObservableCounter` ++ - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` ++ - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` ++- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. ++ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) ++ - The `Counter` method is replaced by `Meter.Int64Counter` ++ - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` ++ - The `Histogram` method is replaced by `Meter.Int64Histogram` ++- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. ++ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) ++ - The `Counter` method is replaced by `Meter.Float64Counter` ++ - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` ++ - The `Histogram` method is replaced by `Meter.Float64Histogram` ++ ++## [1.11.2/0.34.0] 2022-12-05 ++ ++### Added ++ ++- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. ++ This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) ++- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. ++ This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) ++- OTLP exporters now recognize: (#3363) ++ - `OTEL_EXPORTER_OTLP_INSECURE` ++ - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` ++ - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` ++ - `OTEL_EXPORTER_OTLP_CLIENT_KEY` ++ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` ++ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` ++ - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` ++ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` ++ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` ++- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. ++ These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) ++- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. ++ These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) ++- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) ++- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) ++ ++### Changed ++ ++- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. ++ Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. ++ The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) ++- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) ++- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) ++- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) ++- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) ++ ++### Fixed ++ ++- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) ++- Remove comparable requirement for `Reader`s. (#3387) ++- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) ++- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) ++- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) ++- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) ++- Re-enabled Attribute Filters in the Metric SDK. (#3396) ++- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) ++- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) ++- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) ++- Prevent duplicate Prometheus description, unit, and type. (#3469) ++- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) ++ ++### Removed ++ ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) ++- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) ++ ++### Deprecated ++ ++- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. ++ Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) ++ ++## [1.11.1/0.33.0] 2022-10-19 ++ ++### Added ++ ++- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. ++ By default, it will register with the default Prometheus registerer. ++ A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) ++- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) ++- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) ++ ++### Changed ++ ++- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. ++ It will return an error if the exporter fails to register with Prometheus. (#3239) ++ ++### Fixed ++ ++- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) ++- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. ++ This fixes the implementation to be compliant with the W3C specification. (#3226) ++- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) ++- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) ++- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) ++- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) ++- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) ++- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) ++- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. ++ Instead the exporter is defined as an "unchecked" collector for Prometheus. ++ This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) ++- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) ++- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. ++ This can be disabled using the `WithoutUnits()` option added to that package. (#3352) ++ ++## [1.11.0/0.32.3] 2022-10-12 ++ ++### Added ++ ++- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) ++ ++### Changed ++ ++- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) ++- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. ++ This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) ++ ++## [0.32.2] Metric SDK (Alpha) - 2022-10-11 ++ ++### Added ++ ++- Added an example of using metric views to customize instruments. (#3177) ++- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) ++ ++### Changed ++ ++- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) ++- Update histogram default bounds to match the requirements of the latest specification. (#3222) ++- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) ++ ++### Fixed ++ ++- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) ++- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) ++- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) ++- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) ++- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) ++ ++## [0.32.1] Metric SDK (Alpha) - 2022-09-22 ++ ++### Changed ++ ++- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. ++ Invalid characters are replaced with `_`. (#3212) ++ ++### Added ++ ++- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) ++- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) ++ ++### Fixed ++ ++- Updated go.mods to point to valid versions of the sdk. (#3216) ++- Set the `MeterProvider` resource on all exported metric data. (#3218) ++ ++## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 ++ ++### Changed ++ ++- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. ++ Please see the package documentation for how the new SDK is initialized and configured. (#3175) ++- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) ++ ++### Removed ++ ++- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. ++ A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. ++ A replacement package that supports the new metric SDK will be added back in a future release. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) ++- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) ++- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) ++- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) ++- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) ++- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) ++- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) ++ + ## [1.10.0] - 2022-09-09 + + ### Added +@@ -191,7 +1005,7 @@ Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be mod + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` +- ++ + If the provided environment variables are invalid (negative), the default values would be used. + - Rename the `gc` runtime name to `go` (#2560) + - Add resource container ID detection. (#2418) +@@ -452,7 +1266,7 @@ This release includes an API and SDK for the tracing signal that will comply wit + - Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) + - The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) + - Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +-- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195) ++- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) + - Fixed typos in resources.go. (#2201) + + ## [1.0.0-RC2] - 2021-07-26 +@@ -898,7 +1712,7 @@ with major version 0. + - `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) + - Added documentation about the project's versioning policy. (#1388) + - Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +-- Added codeql worfklow to GitHub Actions (#1428) ++- Added codeql workflow to GitHub Actions (#1428) + - Added Gosec workflow to GitHub Actions (#1429) + - Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) + - Add an OpenCensus exporter bridge. (#1444) +@@ -1741,7 +2555,7 @@ There is still a possibility of breaking changes. + + ### Fixed + +-- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428) ++- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) + + ## [0.2.1] - 2020-01-08 + +@@ -1907,7 +2721,27 @@ It contains api and sdk for trace and meter. + - CircleCI build CI manifest files. + - CODEOWNERS file to track owners of this project. + +-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.10.0...HEAD ++[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.20.0...HEAD ++[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 ++[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 ++[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 ++[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 ++[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 ++[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 ++[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 ++[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 ++[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 ++[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 ++[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 ++[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 ++[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 ++[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 ++[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 ++[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 ++[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 ++[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 ++[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 ++[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 + [1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 + [1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 + [1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +@@ -1959,3 +2793,11 @@ It contains api and sdk for trace and meter. + [0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 + [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 + [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 ++ ++[Go 1.20]: https://go.dev/doc/go1.20 ++[Go 1.19]: https://go.dev/doc/go1.19 ++[Go 1.18]: https://go.dev/doc/go1.18 ++ ++[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric ++[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric ++[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace +diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS +index c4012ed6ca1..623740007d4 100644 +--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS ++++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS +@@ -12,6 +12,6 @@ + # https://help.github.com/en/articles/about-code-owners + # + +-* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu ++* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +-CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod ++CODEOWNERS @MrAlias @MadVikingGod @pellared +\ No newline at end of file +diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +index 9371a481ab1..a00dbca7b08 100644 +--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md ++++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +@@ -6,7 +6,7 @@ OpenTelemetry + repo for information on this and other language SIGs. + + See the [public meeting +-notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b) ++notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) + for a summary description of past meetings. To request edit access, + join the meeting or get in touch on + [Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). +@@ -28,6 +28,11 @@ precommit` - the `precommit` target is the default). + The `precommit` target also fixes the formatting of the code and + checks the status of the go module files. + ++Additionally, there is a `codespell` target that checks for common ++typos in the code. It is not run by default, but you can run it ++manually with `make codespell`. It will set up a virtual environment ++in `venv` and install `codespell` there. ++ + If after running `make precommit` the output of `git status` contains + `nothing to commit, working tree clean` then it means that everything + is up-to-date and properly formatted. +@@ -94,38 +99,66 @@ request ID to the entry you added to `CHANGELOG.md`. + + ### How to Get PRs Merged + +-A PR is considered to be **ready to merge** when: +- +-* It has received two approvals from Collaborators/Maintainers (at +- different companies). This is not enforced through technical means +- and a PR may be **ready to merge** with a single approval if the change +- and its approach have been discussed and consensus reached. +-* Feedback has been addressed. +-* Any substantive changes to your PR will require that you clear any prior +- Approval reviews, this includes changes resulting from other feedback. Unless +- the approver explicitly stated that their approval will persist across +- changes it should be assumed that the PR needs their review again. Other +- project members (e.g. approvers, maintainers) can help with this if there are +- any questions or if you forget to clear reviews. +-* It has been open for review for at least one working day. This gives +- people reasonable time to review. +-* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for +- one day and may be merged with a single Maintainer's approval. +-* `CHANGELOG.md` has been updated to reflect what has been +- added, changed, removed, or fixed. +-* `README.md` has been updated if necessary. +-* Urgent fix can take exception as long as it has been actively +- communicated. +- +-Any Maintainer can merge the PR once it is **ready to merge**. ++A PR is considered **ready to merge** when: ++ ++* It has received two qualified approvals[^1]. ++ ++ This is not enforced through automation, but needs to be validated by the ++ maintainer merging. ++ * The qualified approvals need to be from [Approver]s/[Maintainer]s ++ affiliated with different companies. Two qualified approvals from ++ [Approver]s or [Maintainer]s affiliated with the same company counts as a ++ single qualified approval. ++ * PRs introducing changes that have already been discussed and consensus ++ reached only need one qualified approval. The discussion and resolution ++ needs to be linked to the PR. ++ * Trivial changes[^2] only need one qualified approval. ++ ++* All feedback has been addressed. ++ * All PR comments and suggestions are resolved. ++ * All GitHub Pull Request reviews with a status of "Request changes" have ++ been addressed. Another review by the objecting reviewer with a different ++ status can be submitted to clear the original review, or the review can be ++ dismissed by a [Maintainer] when the issues from the original review have ++ been addressed. ++ * Any comments or reviews that cannot be resolved between the PR author and ++ reviewers can be submitted to the community [Approver]s and [Maintainer]s ++ during the weekly SIG meeting. If consensus is reached among the ++ [Approver]s and [Maintainer]s during the SIG meeting the objections to the ++ PR may be dismissed or resolved or the PR closed by a [Maintainer]. ++ * Any substantive changes to the PR require existing Approval reviews be ++ cleared unless the approver explicitly states that their approval persists ++ across changes. This includes changes resulting from other feedback. ++ [Approver]s and [Maintainer]s can help in clearing reviews and they should ++ be consulted if there are any questions. ++ ++* The PR branch is up to date with the base branch it is merging into. ++ * To ensure this does not block the PR, it should be configured to allow ++ maintainers to update it. ++ ++* It has been open for review for at least one working day. This gives people ++ reasonable time to review. ++ * Trivial changes[^2] do not have to wait for one day and may be merged with ++ a single [Maintainer]'s approval. ++ ++* All required GitHub workflows have succeeded. ++* Urgent fix can take exception as long as it has been actively communicated ++ among [Maintainer]s. ++ ++Any [Maintainer] can merge the PR once the above criteria have been met. ++ ++[^1]: A qualified approval is a GitHub Pull Request review with "Approve" ++ status from an OpenTelemetry Go [Approver] or [Maintainer]. ++[^2]: Trivial changes include: typo corrections, cosmetic non-substantive ++ changes, documentation corrections or updates, dependency updates, etc. + + ## Design Choices + + As with other OpenTelemetry clients, opentelemetry-go follows the +-[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification). ++[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). + + It's especially valuable to read through the [library +-guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md). ++guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). + + ### Focus on Capabilities, Not Structure Compliance + +@@ -146,23 +179,23 @@ For a deeper discussion, see + + ## Documentation + +-Each non-example Go Module should have its own `README.md` containing: ++Each (non-internal, non-test) package must be documented using ++[Go Doc Comments](https://go.dev/doc/comment), ++preferably in a `doc.go` file. ++ ++Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) ++instead of putting code snippets in Go doc comments. ++In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). + +-- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/). +-- Brief description. +-- Installation instructions (and requirements if applicable). +-- Hyperlink to an example. Depending on the component the example can be: +- - An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go). +- - A sample Go application with its own `README.md`, like [here](example/zipkin). +-- Additional documentation sections such us: +- - Configuration, +- - Contributing, +- - References. ++You can install and run a "local Go Doc site" in the following way: + +-[Here](exporters/jaeger/README.md) is an example of a concise `README.md`. ++ ```sh ++ go install golang.org/x/pkgsite/cmd/pkgsite@latest ++ pkgsite ++ ``` + +-Moreover, it should be possible to navigate to any `README.md` from the +-root `README.md`. ++[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) ++is an example of a very well-documented package. + + ## Style Guide + +@@ -216,7 +249,7 @@ Meaning a `config` from one package should not be directly used by another. The + one exception is the API packages. The configs from the base API, eg. + `go.opentelemetry.io/otel/trace.TracerConfig` and + `go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +-by the SDK therefor it is expected that these are exported. ++by the SDK therefore it is expected that these are exported. + + When a config is exported we want to maintain forward and backward + compatibility, to achieve this no fields should be exported but should +@@ -234,12 +267,12 @@ func newConfig(options ...Option) config { + for _, option := range options { + config = option.apply(config) + } +- // Preform any validation here. ++ // Perform any validation here. + return config + } + ``` + +-If validation of the `config` options is also preformed this can return an ++If validation of the `config` options is also performed this can return an + error as well that is expected to be handled by the instantiation function + or propagated to the user. + +@@ -438,12 +471,37 @@ their parameters appropriately named. + #### Interface Stability + + All exported stable interfaces that include the following warning in their +-doumentation are allowed to be extended with additional methods. ++documentation are allowed to be extended with additional methods. + + > Warning: methods may be added to this interface in minor releases. + ++These interfaces are defined by the OpenTelemetry specification and will be ++updated as the specification evolves. ++ + Otherwise, stable interfaces MUST NOT be modified. + ++#### How to Change Specification Interfaces ++ ++When an API change must be made, we will update the SDK with the new method one ++release before the API change. This will allow the SDK one version before the ++API change to work seamlessly with the new API. ++ ++If an incompatible version of the SDK is used with the new API the application ++will fail to compile. ++ ++#### How Not to Change Specification Interfaces ++ ++We have explored using a v2 of the API to change interfaces and found that there ++was no way to introduce a v2 and have it work seamlessly with the v1 of the API. ++Problems happened with libraries that upgraded to v2 when an application did not, ++and would not produce any telemetry. ++ ++More detail of the approaches considered and their limitations can be found in ++the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) ++issue. ++ ++#### How to Change Other Interfaces ++ + If new functionality is needed for an interface that cannot be changed it MUST + be added by including an additional interface. That added interface can be a + simple interface for the specific functionality that you want to add or it can +@@ -498,29 +556,65 @@ functionality should be added, each one will need their own super-set + interfaces and will duplicate the pattern. For this reason, the simple targeted + interface that defines the specific functionality should be preferred. + ++### Testing ++ ++The tests should never leak goroutines. ++ ++Use the term `ConcurrentSafe` in the test name when it aims to verify the ++absence of race conditions. ++ ++### Internal packages ++ ++The use of internal packages should be scoped to a single module. A sub-module ++should never import from a parent internal package. This creates a coupling ++between the two modules where a user can upgrade the parent without the child ++and if the internal package API has changed it will fail to upgrade[^3]. ++ ++There are two known exceptions to this rule: ++ ++- `go.opentelemetry.io/otel/internal/global` ++ - This package manages global state for all of opentelemetry-go. It needs to ++ be a single package in order to ensure the uniqueness of the global state. ++- `go.opentelemetry.io/otel/internal/baggage` ++ - This package provides values in a `context.Context` that need to be ++ recognized by `go.opentelemetry.io/otel/baggage` and ++ `go.opentelemetry.io/otel/bridge/opentracing` but remain private. ++ ++If you have duplicate code in multiple modules, make that code into a Go ++template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] ++to render the templates in the desired locations. See [#4404] for an example of ++this. ++ ++[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 ++ + ## Approvers and Maintainers + +-Approvers: ++### Approvers + + - [Evan Torrie](https://github.com/evantorrie), Verizon Media +-- [Josh MacDonald](https://github.com/jmacd), LightStep + - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics + - [David Ashpole](https://github.com/dashpole), Google +-- [Robert PajÄ…k](https://github.com/pellared), Splunk + - [Chester Cheung](https://github.com/hanyuancheung), Tencent +-- [Damien Mathieu](https://github.com/dmathieu), Auth0/Okta ++- [Damien Mathieu](https://github.com/dmathieu), Elastic ++- [Anthony Mirabella](https://github.com/Aneurysm9), AWS + +-Maintainers: ++### Maintainers + + - [Aaron Clawson](https://github.com/MadVikingGod), LightStep +-- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ++- [Robert PajÄ…k](https://github.com/pellared), Splunk + - [Tyler Yahn](https://github.com/MrAlias), Splunk + +-Emeritus: ++### Emeritus + + - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep ++- [Josh MacDonald](https://github.com/jmacd), LightStep + + ### Become an Approver or a Maintainer + + See the [community membership document in OpenTelemetry community + repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). ++ ++[Approver]: #approvers ++[Maintainer]: #maintainers ++[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl ++[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 +diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile +index 18ffaa33a99..35fc189961b 100644 +--- a/vendor/go.opentelemetry.io/otel/Makefile ++++ b/vendor/go.opentelemetry.io/otel/Makefile +@@ -17,7 +17,7 @@ TOOLS_MOD_DIR := ./internal/tools + ALL_DOCS := $(shell find . -name '*.md' -type f | sort) + ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) + OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +-ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort) ++ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + + GO = go + TIMEOUT = 60 +@@ -25,8 +25,8 @@ TIMEOUT = 60 + .DEFAULT_GOAL := precommit + + .PHONY: precommit ci +-precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default +-ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage ++precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default ++ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage + + # Tools + +@@ -71,21 +71,78 @@ $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + GOJQ = $(TOOLS)/gojq + $(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + ++GOTMPL = $(TOOLS)/gotmpl ++$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl ++ ++GORELEASE = $(TOOLS)/gorelease ++$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease ++ ++GOVULNCHECK = $(TOOLS)/govulncheck ++$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck ++ + .PHONY: tools +-tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) ++tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) + +-# Build ++# Virtualized python tools via docker ++ ++# The directory where the virtual environment is created. ++VENVDIR := venv ++ ++# The directory where the python tools are installed. ++PYTOOLS := $(VENVDIR)/bin ++ ++# The pip executable in the virtual environment. ++PIP := $(PYTOOLS)/pip ++ ++# The directory in the docker image where the current directory is mounted. ++WORKDIR := /workdir + +-.PHONY: generate build ++# The python image to use for the virtual environment. ++PYTHONIMAGE := python:3.11.3-slim-bullseye + +-generate: $(OTEL_GO_MOD_DIRS:%=generate/%) +-generate/%: DIR=$* +-generate/%: | $(STRINGER) $(PORTO) ++# Run the python image with the current directory mounted. ++DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) ++ ++# Create a virtual environment for Python tools. ++$(PYTOOLS): ++# The `--upgrade` flag is needed to ensure that the virtual environment is ++# created with the latest pip version. ++ @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" ++ ++# Install python packages into the virtual environment. ++$(PYTOOLS)/%: | $(PYTOOLS) ++ @$(DOCKERPY) $(PIP) install -r requirements.txt ++ ++CODESPELL = $(PYTOOLS)/codespell ++$(CODESPELL): PACKAGE=codespell ++ ++# Generate ++ ++.PHONY: generate ++generate: go-generate vanity-import-fix ++ ++.PHONY: go-generate ++go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) ++go-generate/%: DIR=$* ++go-generate/%: | $(STRINGER) $(GOTMPL) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ +- && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w . ++ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... ++ ++.PHONY: vanity-import-fix ++vanity-import-fix: | $(PORTO) ++ @$(PORTO) --include-internal -w . ++ ++# Generate go.work file for local development. ++.PHONY: go-work ++go-work: | $(CROSSLINK) ++ $(CROSSLINK) work --root=$(shell pwd) ++ ++# Build ++ ++.PHONY: build + +-build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) ++build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) + build/%: DIR=$* + build/%: + @echo "$(GO) build $(DIR)/..." \ +@@ -135,6 +192,18 @@ test-coverage: | $(GOCOVMERGE) + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + ++# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. ++BENCHMARK_TARGETS := sdk/trace ++.PHONY: benchmark ++benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) ++BENCHMARK_FILTER = . ++# You can override the filter for a particular directory by adding a rule here. ++benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample ++benchmark/%: ++ @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ ++ && cd $* \ ++ $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) ++ + .PHONY: golangci-lint golangci-lint-fix + golangci-lint-fix: ARGS=--fix + golangci-lint-fix: golangci-lint +@@ -156,30 +225,38 @@ go-mod-tidy/%: DIR=$* + go-mod-tidy/%: | crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ +- && $(GO) mod tidy -compat=1.17 ++ && $(GO) mod tidy -compat=1.20 + + .PHONY: lint-modules + lint-modules: go-mod-tidy + + .PHONY: lint +-lint: misspell lint-modules golangci-lint ++lint: misspell lint-modules golangci-lint govulncheck + + .PHONY: vanity-import-check + vanity-import-check: | $(PORTO) +- @$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)" +- +-.PHONY: vanity-import-fix +-vanity-import-fix: | $(PORTO) +- @$(PORTO) --include-internal -w . ++ @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) + + .PHONY: misspell + misspell: | $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + ++.PHONY: govulncheck ++govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) ++govulncheck/%: DIR=$* ++govulncheck/%: | $(GOVULNCHECK) ++ @echo "govulncheck ./... in $(DIR)" \ ++ && cd $(DIR) \ ++ && $(GOVULNCHECK) ./... ++ ++.PHONY: codespell ++codespell: | $(CODESPELL) ++ @$(DOCKERPY) $(CODESPELL) ++ + .PHONY: license-check + license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ +- awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ ++ awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ +@@ -189,7 +266,7 @@ license-check: + DEPENDABOT_CONFIG = .github/dependabot.yml + .PHONY: dependabot-check + dependabot-check: | $(DBOTCONF) +- @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)" ++ @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) + + .PHONY: dependabot-generate + dependabot-generate: | $(DBOTCONF) +@@ -208,11 +285,22 @@ check-clean-work-tree: + SEMCONVPKG ?= "semconv/" + .PHONY: semconv-generate + semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) +- @[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) +- @[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) +- @$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/trace" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" +- @$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/resource" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" +- @$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" ++ [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) ++ [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) ++ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" ++ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" ++ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" ++ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" ++ $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" ++ ++.PHONY: gorelease ++gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) ++gorelease/%: DIR=$* ++gorelease/%:| $(GORELEASE) ++ @echo "gorelease in $(DIR):" \ ++ && cd $(DIR) \ ++ && $(GORELEASE) \ ++ || echo "" + + .PHONY: prerelease + prerelease: | $(MULTIMOD) +@@ -224,3 +312,7 @@ COMMIT ?= "HEAD" + add-tags: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} ++ ++.PHONY: lint-markdown ++lint-markdown: ++ docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md +diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md +index 4aeecb8bfe7..2c5b0cc28ab 100644 +--- a/vendor/go.opentelemetry.io/otel/README.md ++++ b/vendor/go.opentelemetry.io/otel/README.md +@@ -11,22 +11,22 @@ It provides a set of APIs to directly measure performance and behavior of your s + + ## Project Status + +-| Signal | Status | Project | +-| ------- | ---------- | ------- | +-| Traces | Stable | N/A | +-| Metrics | Alpha | N/A | +-| Logs | Frozen [1] | N/A | ++| Signal | Status | ++|---------|------------| ++| Traces | Stable | ++| Metrics | Stable | ++| Logs | Design [1] | + +-- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics. ++- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). + No Logs Pull Requests are currently being accepted. + +-Progress and status specific to this repository is tracked in our local ++Progress and status specific to this repository is tracked in our + [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) + and + [milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + + Project versioning information and stability guarantees can be found in the +-[versioning documentation](./VERSIONING.md). ++[versioning documentation](VERSIONING.md). + + ### Compatibility + +@@ -49,22 +49,17 @@ stop ensuring compatibility with these versions in the following manner: + Currently, this project supports the following environments. + + | OS | Go Version | Architecture | +-| ------- | ---------- | ------------ | +-| Ubuntu | 1.19 | amd64 | +-| Ubuntu | 1.18 | amd64 | +-| Ubuntu | 1.17 | amd64 | +-| Ubuntu | 1.19 | 386 | +-| Ubuntu | 1.18 | 386 | +-| Ubuntu | 1.17 | 386 | +-| MacOS | 1.19 | amd64 | +-| MacOS | 1.18 | amd64 | +-| MacOS | 1.17 | amd64 | +-| Windows | 1.19 | amd64 | +-| Windows | 1.18 | amd64 | +-| Windows | 1.17 | amd64 | +-| Windows | 1.19 | 386 | +-| Windows | 1.18 | 386 | +-| Windows | 1.17 | 386 | ++|---------|------------|--------------| ++| Ubuntu | 1.21 | amd64 | ++| Ubuntu | 1.20 | amd64 | ++| Ubuntu | 1.21 | 386 | ++| Ubuntu | 1.20 | 386 | ++| MacOS | 1.21 | amd64 | ++| MacOS | 1.20 | amd64 | ++| Windows | 1.21 | amd64 | ++| Windows | 1.20 | amd64 | ++| Windows | 1.21 | 386 | ++| Windows | 1.20 | 386 | + + While this project should work for other systems, no compatibility guarantees + are made for those systems currently. +@@ -102,12 +97,11 @@ export pipeline to send that telemetry to an observability platform. + All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + + | Exporter | Metrics | Traces | +-| :-----------------------------------: | :-----: | :----: | +-| [Jaeger](./exporters/jaeger/) | | ✓ | +-| [OTLP](./exporters/otlp/) | ✓ | ✓ | +-| [Prometheus](./exporters/prometheus/) | ✓ | | +-| [stdout](./exporters/stdout/) | ✓ | ✓ | +-| [Zipkin](./exporters/zipkin/) | | ✓ | ++|---------------------------------------|:-------:|:------:| ++| [OTLP](./exporters/otlp/) | ✓ | ✓ | ++| [Prometheus](./exporters/prometheus/) | ✓ | | ++| [stdout](./exporters/stdout/) | ✓ | ✓ | ++| [Zipkin](./exporters/zipkin/) | | ✓ | + + ## Contributing + +diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md +index 71e57625479..82ce3ee46a1 100644 +--- a/vendor/go.opentelemetry.io/otel/RELEASING.md ++++ b/vendor/go.opentelemetry.io/otel/RELEASING.md +@@ -2,24 +2,31 @@ + + ## Semantic Convention Generation + +-New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated. ++New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. + The `semconv-generate` make target is used for this. + +-1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag. +-2. Run the `make semconv-generate ...` target from this repository. ++1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. ++2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` ++3. Run the `make semconv-generate ...` target from this repository. + + For example, + + ```sh +-export TAG="v1.7.0" # Change to the release version you are generating. +-export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification" +-git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" +-make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO. ++export TAG="v1.21.0" # Change to the release version you are generating. ++export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" ++docker pull otel/semconvgen:latest ++make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. + ``` + + This should create a new sub-package of [`semconv`](./semconv). + Ensure things look correct before submitting a pull request to include the addition. + ++## Breaking changes validation ++ ++You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. ++ ++You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). ++ + ## Pre-Release + + First, decide which module sets will be released and update their versions +@@ -116,7 +123,17 @@ Once verified be sure to [make a release for the `contrib` repository](https://g + + ### Website Documentation + +-Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). ++Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go]. + Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +-[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification ++[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions ++[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/ ++[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go ++ ++### Demo Repository ++ ++Bump the dependencies in the following Go services: ++ ++- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) ++- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) ++- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go +new file mode 100644 +index 00000000000..638c213d59a +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go +@@ -0,0 +1,60 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package attribute // import "go.opentelemetry.io/otel/attribute" ++ ++// Filter supports removing certain attributes from attribute sets. When ++// the filter returns true, the attribute will be kept in the filtered ++// attribute set. When the filter returns false, the attribute is excluded ++// from the filtered attribute set, and the attribute instead appears in ++// the removed list of excluded attributes. ++type Filter func(KeyValue) bool ++ ++// NewAllowKeysFilter returns a Filter that only allows attributes with one of ++// the provided keys. ++// ++// If keys is empty a deny-all filter is returned. ++func NewAllowKeysFilter(keys ...Key) Filter { ++ if len(keys) <= 0 { ++ return func(kv KeyValue) bool { return false } ++ } ++ ++ allowed := make(map[Key]struct{}) ++ for _, k := range keys { ++ allowed[k] = struct{}{} ++ } ++ return func(kv KeyValue) bool { ++ _, ok := allowed[kv.Key] ++ return ok ++ } ++} ++ ++// NewDenyKeysFilter returns a Filter that only allows attributes ++// that do not have one of the provided keys. ++// ++// If keys is empty an allow-all filter is returned. ++func NewDenyKeysFilter(keys ...Key) Filter { ++ if len(keys) <= 0 { ++ return func(kv KeyValue) bool { return true } ++ } ++ ++ forbid := make(map[Key]struct{}) ++ for _, k := range keys { ++ forbid[k] = struct{}{} ++ } ++ return func(kv KeyValue) bool { ++ _, ok := forbid[kv.Key] ++ return !ok ++ } ++} +diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go +index 26be5983223..9f9303d4f15 100644 +--- a/vendor/go.opentelemetry.io/otel/attribute/set.go ++++ b/vendor/go.opentelemetry.io/otel/attribute/set.go +@@ -18,6 +18,7 @@ import ( + "encoding/json" + "reflect" + "sort" ++ "sync" + ) + + type ( +@@ -38,13 +39,6 @@ type ( + iface interface{} + } + +- // Filter supports removing certain attributes from attribute sets. When +- // the filter returns true, the attribute will be kept in the filtered +- // attribute set. When the filter returns false, the attribute is excluded +- // from the filtered attribute set, and the attribute instead appears in +- // the removed list of excluded attributes. +- Filter func(KeyValue) bool +- + // Sortable implements sort.Interface, used for sorting KeyValue. This is + // an exported type to support a memory optimization. A pointer to one of + // these is needed for the call to sort.Stable(), which the caller may +@@ -62,6 +56,12 @@ var ( + iface: [0]KeyValue{}, + }, + } ++ ++ // sortables is a pool of Sortables used to create Sets with a user does ++ // not provide one. ++ sortables = sync.Pool{ ++ New: func() interface{} { return new(Sortable) }, ++ } + ) + + // EmptySet returns a reference to a Set with no elements. +@@ -91,7 +91,7 @@ func (l *Set) Len() int { + + // Get returns the KeyValue at ordered position idx in this set. + func (l *Set) Get(idx int) (KeyValue, bool) { +- if l == nil { ++ if l == nil || !l.equivalent.Valid() { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() +@@ -107,7 +107,7 @@ func (l *Set) Get(idx int) (KeyValue, bool) { + + // Value returns the value of a specified key in this set. + func (l *Set) Value(k Key) (Value, bool) { +- if l == nil { ++ if l == nil || !l.equivalent.Valid() { + return Value{}, false + } + rValue := l.equivalent.reflectValue() +@@ -191,7 +191,9 @@ func NewSet(kvs ...KeyValue) Set { + if len(kvs) == 0 { + return empty() + } +- s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil) ++ srt := sortables.Get().(*Sortable) ++ s, _ := NewSetWithSortableFiltered(kvs, srt, nil) ++ sortables.Put(srt) + return s + } + +@@ -218,7 +220,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + if len(kvs) == 0 { + return empty(), nil + } +- return NewSetWithSortableFiltered(kvs, new(Sortable), filter) ++ srt := sortables.Get().(*Sortable) ++ s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) ++ sortables.Put(srt) ++ return s, filtered + } + + // NewSetWithSortableFiltered returns a new Set. +diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go +index 57899f682e7..cb21dd5c096 100644 +--- a/vendor/go.opentelemetry.io/otel/attribute/value.go ++++ b/vendor/go.opentelemetry.io/otel/attribute/value.go +@@ -17,9 +17,11 @@ package attribute // import "go.opentelemetry.io/otel/attribute" + import ( + "encoding/json" + "fmt" ++ "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" ++ "go.opentelemetry.io/otel/internal/attribute" + ) + + //go:generate stringer -type=Type +@@ -66,12 +68,7 @@ func BoolValue(v bool) Value { + + // BoolSliceValue creates a BOOLSLICE Value. + func BoolSliceValue(v []bool) Value { +- cp := make([]bool, len(v)) +- copy(cp, v) +- return Value{ +- vtype: BOOLSLICE, +- slice: &cp, +- } ++ return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} + } + + // IntValue creates an INT64 Value. +@@ -81,13 +78,14 @@ func IntValue(v int) Value { + + // IntSliceValue creates an INTSLICE Value. + func IntSliceValue(v []int) Value { +- cp := make([]int64, 0, len(v)) +- for _, i := range v { +- cp = append(cp, int64(i)) ++ var int64Val int64 ++ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) ++ for i, val := range v { ++ cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, +- slice: &cp, ++ slice: cp.Elem().Interface(), + } + } + +@@ -101,12 +99,7 @@ func Int64Value(v int64) Value { + + // Int64SliceValue creates an INT64SLICE Value. + func Int64SliceValue(v []int64) Value { +- cp := make([]int64, len(v)) +- copy(cp, v) +- return Value{ +- vtype: INT64SLICE, +- slice: &cp, +- } ++ return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} + } + + // Float64Value creates a FLOAT64 Value. +@@ -119,12 +112,7 @@ func Float64Value(v float64) Value { + + // Float64SliceValue creates a FLOAT64SLICE Value. + func Float64SliceValue(v []float64) Value { +- cp := make([]float64, len(v)) +- copy(cp, v) +- return Value{ +- vtype: FLOAT64SLICE, +- slice: &cp, +- } ++ return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} + } + + // StringValue creates a STRING Value. +@@ -137,12 +125,7 @@ func StringValue(v string) Value { + + // StringSliceValue creates a STRINGSLICE Value. + func StringSliceValue(v []string) Value { +- cp := make([]string, len(v)) +- copy(cp, v) +- return Value{ +- vtype: STRINGSLICE, +- slice: &cp, +- } ++ return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} + } + + // Type returns a type of the Value. +@@ -159,10 +142,14 @@ func (v Value) AsBool() bool { + // AsBoolSlice returns the []bool value. Make sure that the Value's type is + // BOOLSLICE. + func (v Value) AsBoolSlice() []bool { +- if s, ok := v.slice.(*[]bool); ok { +- return *s ++ if v.vtype != BOOLSLICE { ++ return nil + } +- return nil ++ return v.asBoolSlice() ++} ++ ++func (v Value) asBoolSlice() []bool { ++ return attribute.AsBoolSlice(v.slice) + } + + // AsInt64 returns the int64 value. Make sure that the Value's type is +@@ -174,10 +161,14 @@ func (v Value) AsInt64() int64 { + // AsInt64Slice returns the []int64 value. Make sure that the Value's type is + // INT64SLICE. + func (v Value) AsInt64Slice() []int64 { +- if s, ok := v.slice.(*[]int64); ok { +- return *s ++ if v.vtype != INT64SLICE { ++ return nil + } +- return nil ++ return v.asInt64Slice() ++} ++ ++func (v Value) asInt64Slice() []int64 { ++ return attribute.AsInt64Slice(v.slice) + } + + // AsFloat64 returns the float64 value. Make sure that the Value's +@@ -189,10 +180,14 @@ func (v Value) AsFloat64() float64 { + // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is + // FLOAT64SLICE. + func (v Value) AsFloat64Slice() []float64 { +- if s, ok := v.slice.(*[]float64); ok { +- return *s ++ if v.vtype != FLOAT64SLICE { ++ return nil + } +- return nil ++ return v.asFloat64Slice() ++} ++ ++func (v Value) asFloat64Slice() []float64 { ++ return attribute.AsFloat64Slice(v.slice) + } + + // AsString returns the string value. Make sure that the Value's type +@@ -204,10 +199,14 @@ func (v Value) AsString() string { + // AsStringSlice returns the []string value. Make sure that the Value's type is + // STRINGSLICE. + func (v Value) AsStringSlice() []string { +- if s, ok := v.slice.(*[]string); ok { +- return *s ++ if v.vtype != STRINGSLICE { ++ return nil + } +- return nil ++ return v.asStringSlice() ++} ++ ++func (v Value) asStringSlice() []string { ++ return attribute.AsStringSlice(v.slice) + } + + type unknownValueType struct{} +@@ -218,19 +217,19 @@ func (v Value) AsInterface() interface{} { + case BOOL: + return v.AsBool() + case BOOLSLICE: +- return v.AsBoolSlice() ++ return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: +- return v.AsInt64Slice() ++ return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: +- return v.AsFloat64Slice() ++ return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: +- return v.AsStringSlice() ++ return v.asStringSlice() + } + return unknownValueType{} + } +@@ -239,19 +238,19 @@ func (v Value) AsInterface() interface{} { + func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: +- return fmt.Sprint(*(v.slice.(*[]bool))) ++ return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: +- return fmt.Sprint(*(v.slice.(*[]int64))) ++ return fmt.Sprint(v.asInt64Slice()) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: +- return fmt.Sprint(*(v.slice.(*[]float64))) ++ return fmt.Sprint(v.asFloat64Slice()) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: +- return fmt.Sprint(*(v.slice.(*[]string))) ++ return fmt.Sprint(v.asStringSlice()) + case STRING: + return v.stringly + default: +diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go +index eba180e04f8..84532cb1da3 100644 +--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go ++++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go +@@ -61,11 +61,6 @@ type Property struct { + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +- +- // hasData indicates whether the created property contains data or not. +- // Properties that do not contain data are invalid with no other check +- // required. +- hasData bool + } + + // NewKeyProperty returns a new Property for key. +@@ -76,7 +71,7 @@ func NewKeyProperty(key string) (Property, error) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + +- p := Property{key: key, hasData: true} ++ p := Property{key: key} + return p, nil + } + +@@ -95,7 +90,6 @@ func NewKeyValueProperty(key, value string) (Property, error) { + key: key, + value: value, + hasValue: true, +- hasData: true, + } + return p, nil + } +@@ -117,7 +111,7 @@ func parseProperty(property string) (Property, error) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + +- p := Property{hasData: true} ++ var p Property + if match[1] != "" { + p.key = match[1] + } else { +@@ -136,10 +130,6 @@ func (p Property) validate() error { + return fmt.Errorf("invalid property: %w", err) + } + +- if !p.hasData { +- return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p)) +- } +- + if !keyRe.MatchString(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } +@@ -250,8 +240,9 @@ type Member struct { + hasData bool + } + +-// NewMember returns a new Member from the passed arguments. An error is +-// returned if the created Member would be invalid according to the W3C ++// NewMember returns a new Member from the passed arguments. The key will be ++// used directly while the value will be url decoded after validation. An error ++// is returned if the created Member would be invalid according to the W3C + // Baggage specification. + func NewMember(key, value string, props ...Property) (Member, error) { + m := Member{ +@@ -263,7 +254,11 @@ func NewMember(key, value string, props ...Property) (Member, error) { + if err := m.validate(); err != nil { + return newInvalidMember(), err + } +- ++ decodedValue, err := url.PathUnescape(value) ++ if err != nil { ++ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) ++ } ++ m.value = decodedValue + return m, nil + } + +@@ -284,52 +279,45 @@ func parseMember(member string) (Member, error) { + props properties + ) + +- parts := strings.SplitN(member, propertyDelimiter, 2) +- switch len(parts) { +- case 2: ++ keyValue, properties, found := strings.Cut(member, propertyDelimiter) ++ if found { + // Parse the member properties. +- for _, pStr := range strings.Split(parts[1], propertyDelimiter) { ++ for _, pStr := range strings.Split(properties, propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } +- fallthrough +- case 1: +- // Parse the member key/value pair. +- +- // Take into account a value can contain equal signs (=). +- kv := strings.SplitN(parts[0], keyValueDelimiter, 2) +- if len(kv) != 2 { +- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) +- } +- // "Leading and trailing whitespaces are allowed but MUST be trimmed +- // when converting the header into a data structure." +- key = strings.TrimSpace(kv[0]) +- var err error +- value, err = url.QueryUnescape(strings.TrimSpace(kv[1])) +- if err != nil { +- return newInvalidMember(), fmt.Errorf("%w: %q", err, value) +- } +- if !keyRe.MatchString(key) { +- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) +- } +- if !valueRe.MatchString(value) { +- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) +- } +- default: +- // This should never happen unless a developer has changed the string +- // splitting somehow. Panic instead of failing silently and allowing +- // the bug to slip past the CI checks. +- panic("failed to parse baggage member") ++ } ++ // Parse the member key/value pair. ++ ++ // Take into account a value can contain equal signs (=). ++ k, v, found := strings.Cut(keyValue, keyValueDelimiter) ++ if !found { ++ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) ++ } ++ // "Leading and trailing whitespaces are allowed but MUST be trimmed ++ // when converting the header into a data structure." ++ key = strings.TrimSpace(k) ++ var err error ++ value, err = url.PathUnescape(strings.TrimSpace(v)) ++ if err != nil { ++ return newInvalidMember(), fmt.Errorf("%w: %q", err, value) ++ } ++ if !keyRe.MatchString(key) { ++ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) ++ } ++ if !valueRe.MatchString(value) { ++ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + return Member{key: key, value: value, properties: props, hasData: true}, nil + } + +-// validate ensures m conforms to the W3C Baggage specification, returning an +-// error otherwise. ++// validate ensures m conforms to the W3C Baggage specification. ++// A key is just an ASCII string, but a value must be URL encoded UTF-8, ++// returning an error otherwise. + func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) +@@ -465,6 +453,7 @@ func (b Baggage) Member(key string) Member { + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), ++ hasData: true, + } + } + +@@ -484,6 +473,7 @@ func (b Baggage) Members() []Member { + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), ++ hasData: true, + }) + } + return members +diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go +index 064a9279fd1..587ebae4e30 100644 +--- a/vendor/go.opentelemetry.io/otel/codes/codes.go ++++ b/vendor/go.opentelemetry.io/otel/codes/codes.go +@@ -23,10 +23,20 @@ import ( + const ( + // Unset is the default status code. + Unset Code = 0 ++ + // Error indicates the operation contains an error. ++ // ++ // NOTE: The error code in OTLP is 2. ++ // The value of this enum is only relevant to the internals ++ // of the Go SDK. + Error Code = 1 ++ + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. ++ // ++ // NOTE: The Ok code in OTLP is 1. ++ // The value of this enum is only relevant to the internals ++ // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go +index df3e0f1b621..4e328fbb4b3 100644 +--- a/vendor/go.opentelemetry.io/otel/codes/doc.go ++++ b/vendor/go.opentelemetry.io/otel/codes/doc.go +@@ -16,6 +16,6 @@ + Package codes defines the canonical error codes used by OpenTelemetry. + + It conforms to [the OpenTelemetry +-specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode). ++specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). + */ + package codes // import "go.opentelemetry.io/otel/codes" +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md +index ca91fd4f489..50295223182 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md +@@ -2,7 +2,7 @@ + + [![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) + +-[OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.5.0/specification/protocol/exporter.md) implementation. ++[OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md) implementation. + + ## Installation + +@@ -12,8 +12,8 @@ go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace + + ## Examples + +-- [Exporter setup and examples](./otlptracehttp/example_test.go) +-- [Full example sending telemetry to a local collector](../../../example/otel-collector) ++- [HTTP Exporter setup and examples](./otlptracehttp/example_test.go) ++- [Full example of gRPC Exporter sending telemetry to a local collector](../../../example/otel-collector) + + ## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) + +@@ -36,7 +36,7 @@ The `otlptracehttp` package implements a client for the span exporter that sends + The following environment variables can be used (instead of options objects) to + override the default configuration. For more information about how each of + these environment variables is interpreted, see [the OpenTelemetry +-specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md). ++specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md). + + | Environment variable | Option | Default value | + | ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- | +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +index c5ee6c098cc..0dbe15555b3 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +@@ -17,6 +17,7 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + import ( + "context" + "errors" ++ "fmt" + "sync" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" +@@ -45,7 +46,11 @@ func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) + return nil + } + +- return e.client.UploadTraces(ctx, protoSpans) ++ err := e.client.UploadTraces(ctx, protoSpans) ++ if err != nil { ++ return fmt.Errorf("traces export: %w", err) ++ } ++ return nil + } + + // Start establishes a connection to the receiving endpoint. +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +index 9d6e1898b14..86fb61a0dec 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +@@ -27,10 +27,10 @@ import ( + "google.golang.org/grpc/status" + + "go.opentelemetry.io/otel" +- "go.opentelemetry.io/otel/exporters/otlp/internal" +- "go.opentelemetry.io/otel/exporters/otlp/internal/retry" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" +- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" + ) +@@ -130,13 +130,16 @@ var errAlreadyStopped = errors.New("the client is already stopped") + // If the client has already stopped, an error will be returned describing + // this. + func (c *client) Stop(ctx context.Context) error { ++ // Make sure to return context error if the context is done when calling this method. ++ err := ctx.Err() ++ + // Acquire the c.tscMu lock within the ctx lifetime. + acquired := make(chan struct{}) + go func() { + c.tscMu.Lock() + close(acquired) + }() +- var err error ++ + select { + case <-ctx.Done(): + // The Stop timeout is reached. Kill any remaining exports to force +@@ -202,11 +205,12 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc + ResourceSpans: protoSpans, + }) + if resp != nil && resp.PartialSuccess != nil { +- otel.Handle(internal.PartialSuccessToError( +- internal.TracingPartialSuccess, +- resp.PartialSuccess.RejectedSpans, +- resp.PartialSuccess.ErrorMessage, +- )) ++ msg := resp.PartialSuccess.GetErrorMessage() ++ n := resp.PartialSuccess.GetRejectedSpans() ++ if n != 0 || msg != "" { ++ err := internal.TracePartialSuccessError(n, msg) ++ otel.Handle(err) ++ } + } + // nil is converted to OK. + if status.Code(err) == codes.OK { +@@ -255,7 +259,6 @@ func (c *client) exportContext(parent context.Context) (context.Context, context + // retryable returns if err identifies a request that can be retried and a + // duration to wait for if an explicit throttle time is included in err. + func retryable(err error) (bool, time.Duration) { +- //func retryable(err error) (bool, time.Duration) { + s := status.Convert(err) + switch s.Code() { + case codes.Canceled, +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +similarity index 57% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +index 67003c4a2fa..becb1f0fbbe 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/envconfig/envconfig.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,7 +15,7 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig" ++package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" + + import ( + "crypto/tls" +@@ -23,6 +26,8 @@ import ( + "strconv" + "strings" + "time" ++ ++ "go.opentelemetry.io/otel/internal/global" + ) + + // ConfigFn is the generic function used to set a config. +@@ -59,13 +64,26 @@ func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { + } + } + ++// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. ++func WithBool(n string, fn func(bool)) ConfigFn { ++ return func(e *EnvOptionsReader) { ++ if v, ok := e.GetEnvValue(n); ok { ++ b := strings.ToLower(v) == "true" ++ fn(b) ++ } ++ } ++} ++ + // WithDuration retrieves the specified config and passes it to ConfigFn as a duration. + func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { +- if d, err := strconv.Atoi(v); err == nil { +- fn(time.Duration(d) * time.Millisecond) ++ d, err := strconv.Atoi(v) ++ if err != nil { ++ global.Error(err, "parse duration", "input", v) ++ return + } ++ fn(time.Duration(d) * time.Millisecond) + } + } + } +@@ -83,26 +101,62 @@ func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) + func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { +- if u, err := url.Parse(v); err == nil { +- fn(u) ++ u, err := url.Parse(v) ++ if err != nil { ++ global.Error(err, "parse url", "input", v) ++ return + } ++ fn(u) + } + } + } + +-// WithTLSConfig retrieves the specified config and passes it to ConfigFn as a crypto/tls.Config. +-func WithTLSConfig(n string, fn func(*tls.Config)) func(e *EnvOptionsReader) { ++// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. ++func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { +- if b, err := e.ReadFile(v); err == nil { +- if c, err := createTLSConfig(b); err == nil { +- fn(c) +- } ++ b, err := e.ReadFile(v) ++ if err != nil { ++ global.Error(err, "read tls ca cert file", "file", v) ++ return ++ } ++ c, err := createCertPool(b) ++ if err != nil { ++ global.Error(err, "create tls cert pool") ++ return + } ++ fn(c) + } + } + } + ++// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. ++func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { ++ return func(e *EnvOptionsReader) { ++ vc, okc := e.GetEnvValue(nc) ++ vk, okk := e.GetEnvValue(nk) ++ if !okc || !okk { ++ return ++ } ++ cert, err := e.ReadFile(vc) ++ if err != nil { ++ global.Error(err, "read tls client cert", "file", vc) ++ return ++ } ++ key, err := e.ReadFile(vk) ++ if err != nil { ++ global.Error(err, "read tls client key", "file", vk) ++ return ++ } ++ crt, err := tls.X509KeyPair(cert, key) ++ if err != nil { ++ global.Error(err, "create tls client key pair") ++ return ++ } ++ fn(crt) ++ } ++} ++ + func keyWithNamespace(ns, key string) string { + if ns == "" { + return key +@@ -115,17 +169,20 @@ func stringToHeader(value string) map[string]string { + headers := make(map[string]string) + + for _, header := range headersPairs { +- nameValue := strings.SplitN(header, "=", 2) +- if len(nameValue) < 2 { ++ n, v, found := strings.Cut(header, "=") ++ if !found { ++ global.Error(errors.New("missing '="), "parse headers", "input", header) + continue + } +- name, err := url.QueryUnescape(nameValue[0]) ++ name, err := url.QueryUnescape(n) + if err != nil { ++ global.Error(err, "escape header key", "key", n) + continue + } + trimmedName := strings.TrimSpace(name) +- value, err := url.QueryUnescape(nameValue[1]) ++ value, err := url.QueryUnescape(v) + if err != nil { ++ global.Error(err, "escape header value", "value", v) + continue + } + trimmedValue := strings.TrimSpace(value) +@@ -136,13 +193,10 @@ func stringToHeader(value string) map[string]string { + return headers + } + +-func createTLSConfig(certBytes []byte) (*tls.Config, error) { ++func createCertPool(certBytes []byte) (*x509.CertPool, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } +- +- return &tls.Config{ +- RootCAs: cp, +- }, nil ++ return cp, nil + } +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +new file mode 100644 +index 00000000000..1fb29061894 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +@@ -0,0 +1,35 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" ++ ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go ++ ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go ++ ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go ++ ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go ++ ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go ++//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +similarity index 74% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +index b29f618e3de..32f6dddb4f6 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,17 +15,18 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + + import ( + "crypto/tls" ++ "crypto/x509" + "net/url" + "os" + "path" + "strings" + "time" + +- "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" + ) + + // DefaultEnvOptionsReader is the default environments reader. +@@ -53,6 +57,7 @@ func ApplyHTTPEnvConfigs(cfg Config) Config { + func getOptionsFromEnv() []GenericOption { + opts := []GenericOption{} + ++ tlsConf := &tls.Config{} + DefaultEnvOptionsReader.Apply( + envconfig.WithURL("ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) +@@ -81,8 +86,13 @@ func getOptionsFromEnv() []GenericOption { + return cfg + }, withEndpointForGRPC(u))) + }), +- envconfig.WithTLSConfig("CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), +- envconfig.WithTLSConfig("TRACES_CERTIFICATE", func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), ++ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), ++ envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), ++ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), ++ envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), ++ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), ++ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), ++ envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), +@@ -125,3 +135,19 @@ func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOpt + } + } + } ++ ++// revive:disable-next-line:flag-parameter ++func withInsecure(b bool) GenericOption { ++ if b { ++ return WithInsecure() ++ } ++ return WithSecure() ++} ++ ++func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { ++ return func(e *envconfig.EnvOptionsReader) { ++ if c.RootCAs != nil || len(c.Certificates) > 0 { ++ fn(c) ++ } ++ } ++} +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +similarity index 89% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +index 56e83b85334..19b8434d4d2 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,11 +15,13 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + + import ( + "crypto/tls" + "fmt" ++ "path" ++ "strings" + "time" + + "google.golang.org/grpc" +@@ -25,8 +30,8 @@ import ( + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + +- "go.opentelemetry.io/otel/exporters/otlp/internal" +- "go.opentelemetry.io/otel/exporters/otlp/internal/retry" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + ) + + const ( +@@ -82,13 +87,28 @@ func NewHTTPConfig(opts ...HTTPOption) Config { + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } +- cfg.Traces.URLPath = internal.CleanPath(cfg.Traces.URLPath, DefaultTracesPath) ++ cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath) + return cfg + } + ++// cleanPath returns a path with all spaces trimmed and all redundancies ++// removed. If urlPath is empty or cleaning it results in an empty string, ++// defaultPath is returned instead. ++func cleanPath(urlPath string, defaultPath string) string { ++ tmp := path.Clean(strings.TrimSpace(urlPath)) ++ if tmp == "." { ++ return defaultPath ++ } ++ if !path.IsAbs(tmp) { ++ tmp = fmt.Sprintf("/%s", tmp) ++ } ++ return tmp ++} ++ + // NewGRPCConfig returns a new Config with all settings applied from opts and + // any unset setting using the default gRPC config values. + func NewGRPCConfig(opts ...GRPCOption) Config { ++ userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version() + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), +@@ -97,6 +117,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, ++ DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)}, + } + cfg = ApplyGRPCEnvConfigs(cfg) + for _, opt := range opts { +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +similarity index 90% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +index c2d6c036152..d9dcdc96e7d 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,7 +15,7 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + + const ( + // DefaultCollectorGRPCPort is the default gRPC port of the collector. +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +similarity index 87% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +index 7287cf6cfeb..19b6d4b21f9 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,7 +15,7 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + + import ( + "crypto/tls" +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +similarity index 64% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +index 7994706ab51..076905e54bf 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/partialsuccess.go ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -12,23 +15,10 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" ++package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + + import "fmt" + +-// PartialSuccessDropKind indicates the kind of partial success error +-// received by an OTLP exporter, which corresponds with the signal +-// being exported. +-type PartialSuccessDropKind string +- +-const ( +- // TracingPartialSuccess indicates that some spans were rejected. +- TracingPartialSuccess PartialSuccessDropKind = "spans" +- +- // MetricsPartialSuccess indicates that some metric data points were rejected. +- MetricsPartialSuccess PartialSuccessDropKind = "metric data points" +-) +- + // PartialSuccess represents the underlying error for all handling + // OTLP partial success messages. Use `errors.Is(err, + // PartialSuccess{})` to test whether an error passed to the OTel +@@ -36,7 +26,7 @@ const ( + type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 +- RejectedKind PartialSuccessDropKind ++ RejectedKind string + } + + var _ error = PartialSuccess{} +@@ -56,13 +46,22 @@ func (ps PartialSuccess) Is(err error) bool { + return ok + } + +-// PartialSuccessToError produces an error suitable for passing to +-// `otel.Handle()` out of the fields in a partial success response, +-// independent of which signal produced the outcome. +-func PartialSuccessToError(kind PartialSuccessDropKind, itemsRejected int64, errorMessage string) error { ++// TracePartialSuccessError returns an error describing a partial success ++// response for the trace signal. ++func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { ++ return PartialSuccess{ ++ ErrorMessage: errorMessage, ++ RejectedItems: itemsRejected, ++ RejectedKind: "spans", ++ } ++} ++ ++// MetricPartialSuccessError returns an error describing a partial success ++// response for the metric signal. ++func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, +- RejectedKind: kind, ++ RejectedKind: "metric data points", + } + } +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +similarity index 80% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +index 3d43f7aea97..3ce7d6632b8 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +@@ -1,3 +1,6 @@ ++// Code created by gotmpl. DO NOT MODIFY. ++// source: internal/shared/otlp/retry/retry.go.tmpl ++ + // Copyright The OpenTelemetry Authors + // + // Licensed under the Apache License, Version 2.0 (the "License"); +@@ -15,7 +18,7 @@ + // Package retry provides request retry functionality that can perform + // configurable exponential backoff for transient errors and honor any + // explicit throttle responses received. +-package retry // import "go.opentelemetry.io/otel/exporters/otlp/internal/retry" ++package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + + import ( + "context" +@@ -76,21 +79,21 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + } + } + +- // Do not use NewExponentialBackOff since it calls Reset and the code here +- // must call Reset after changing the InitialInterval (this saves an +- // unnecessary call to Now). +- b := &backoff.ExponentialBackOff{ +- InitialInterval: c.InitialInterval, +- RandomizationFactor: backoff.DefaultRandomizationFactor, +- Multiplier: backoff.DefaultMultiplier, +- MaxInterval: c.MaxInterval, +- MaxElapsedTime: c.MaxElapsedTime, +- Stop: backoff.Stop, +- Clock: backoff.SystemClock, +- } +- b.Reset() +- + return func(ctx context.Context, fn func(context.Context) error) error { ++ // Do not use NewExponentialBackOff since it calls Reset and the code here ++ // must call Reset after changing the InitialInterval (this saves an ++ // unnecessary call to Now). ++ b := &backoff.ExponentialBackOff{ ++ InitialInterval: c.InitialInterval, ++ RandomizationFactor: backoff.DefaultRandomizationFactor, ++ Multiplier: backoff.DefaultMultiplier, ++ MaxInterval: c.MaxInterval, ++ MaxElapsedTime: c.MaxElapsedTime, ++ Stop: backoff.Stop, ++ Clock: backoff.SystemClock, ++ } ++ b.Reset() ++ + for { + err := fn(ctx) + if err == nil { +@@ -119,8 +122,8 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + delay = throttle + } + +- if err := waitFunc(ctx, delay); err != nil { +- return err ++ if ctxErr := waitFunc(ctx, delay); ctxErr != nil { ++ return fmt.Errorf("%w: %s", ctxErr, err) + } + } + } +@@ -129,6 +132,9 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + // Allow override for testing. + var waitFunc = wait + ++// wait takes the caller's context, and the amount of time to wait. It will ++// return nil if the timer fires before or at the same time as the context's ++// deadline. This indicates that the call can be retried. + func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +index 3d09ce590d0..78ce9ad8f0b 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +@@ -22,8 +22,8 @@ import ( + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" +- "go.opentelemetry.io/otel/exporters/otlp/internal/retry" +- "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" ++ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + ) + + // Option applies an option to the gRPC driver. +diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +similarity index 65% +rename from vendor/go.opentelemetry.io/otel/metric/unit/doc.go +rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +index f8e723593e6..10ac73ee3b8 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go ++++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +@@ -12,9 +12,9 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-// Package unit provides units. +-// +-// This package is currently in a pre-GA phase. Backwards incompatible changes +-// may be introduced in subsequent minor version releases as we work to track +-// the evolving OpenTelemetry specification and user feedback. +-package unit // import "go.opentelemetry.io/otel/metric/unit" ++package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" ++ ++// Version is the current release version of the OpenTelemetry OTLP trace exporter in use. ++func Version() string { ++ return "1.19.0" ++} +diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go +index 36cf09f7290..4115fe3bbb5 100644 +--- a/vendor/go.opentelemetry.io/otel/handler.go ++++ b/vendor/go.opentelemetry.io/otel/handler.go +@@ -15,59 +15,16 @@ + package otel // import "go.opentelemetry.io/otel" + + import ( +- "log" +- "os" +- "sync" ++ "go.opentelemetry.io/otel/internal/global" + ) + + var ( +- // globalErrorHandler provides an ErrorHandler that can be used +- // throughout an OpenTelemetry instrumented project. When a user +- // specified ErrorHandler is registered (`SetErrorHandler`) all calls to +- // `Handle` and will be delegated to the registered ErrorHandler. +- globalErrorHandler = defaultErrorHandler() +- +- // Compile-time check that delegator implements ErrorHandler. +- _ ErrorHandler = (*delegator)(nil) +- // Compile-time check that errLogger implements ErrorHandler. +- _ ErrorHandler = (*errLogger)(nil) ++ // Compile-time check global.ErrDelegator implements ErrorHandler. ++ _ ErrorHandler = (*global.ErrDelegator)(nil) ++ // Compile-time check global.ErrLogger implements ErrorHandler. ++ _ ErrorHandler = (*global.ErrLogger)(nil) + ) + +-type delegator struct { +- lock *sync.RWMutex +- eh ErrorHandler +-} +- +-func (d *delegator) Handle(err error) { +- d.lock.RLock() +- defer d.lock.RUnlock() +- d.eh.Handle(err) +-} +- +-// setDelegate sets the ErrorHandler delegate. +-func (d *delegator) setDelegate(eh ErrorHandler) { +- d.lock.Lock() +- defer d.lock.Unlock() +- d.eh = eh +-} +- +-func defaultErrorHandler() *delegator { +- return &delegator{ +- lock: &sync.RWMutex{}, +- eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}, +- } +-} +- +-// errLogger logs errors if no delegate is set, otherwise they are delegated. +-type errLogger struct { +- l *log.Logger +-} +- +-// Handle logs err if no delegate is set, otherwise it is delegated. +-func (h *errLogger) Handle(err error) { +- h.l.Print(err) +-} +- + // GetErrorHandler returns the global ErrorHandler instance. + // + // The default ErrorHandler instance returned will log all errors to STDERR +@@ -77,9 +34,7 @@ func (h *errLogger) Handle(err error) { + // + // Subsequent calls to SetErrorHandler after the first will not forward errors + // to the new ErrorHandler for prior returned instances. +-func GetErrorHandler() ErrorHandler { +- return globalErrorHandler +-} ++func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } + + // SetErrorHandler sets the global ErrorHandler to h. + // +@@ -87,11 +42,7 @@ func GetErrorHandler() ErrorHandler { + // GetErrorHandler will send errors to h instead of the default logging + // ErrorHandler. Subsequent calls will set the global ErrorHandler, but not + // delegate errors to h. +-func SetErrorHandler(h ErrorHandler) { +- globalErrorHandler.setDelegate(h) +-} ++func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } + + // Handle is a convenience function for ErrorHandler().Handle(err). +-func Handle(err error) { +- GetErrorHandler().Handle(err) +-} ++func Handle(err error) { global.Handle(err) } +diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +new file mode 100644 +index 00000000000..622c3ee3f27 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +@@ -0,0 +1,111 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++/* ++Package attribute provide several helper functions for some commonly used ++logic of processing attributes. ++*/ ++package attribute // import "go.opentelemetry.io/otel/internal/attribute" ++ ++import ( ++ "reflect" ++) ++ ++// BoolSliceValue converts a bool slice into an array with same elements as slice. ++func BoolSliceValue(v []bool) interface{} { ++ var zero bool ++ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) ++ copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) ++ return cp.Elem().Interface() ++} ++ ++// Int64SliceValue converts an int64 slice into an array with same elements as slice. ++func Int64SliceValue(v []int64) interface{} { ++ var zero int64 ++ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) ++ copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) ++ return cp.Elem().Interface() ++} ++ ++// Float64SliceValue converts a float64 slice into an array with same elements as slice. ++func Float64SliceValue(v []float64) interface{} { ++ var zero float64 ++ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) ++ copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) ++ return cp.Elem().Interface() ++} ++ ++// StringSliceValue converts a string slice into an array with same elements as slice. ++func StringSliceValue(v []string) interface{} { ++ var zero string ++ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) ++ copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) ++ return cp.Elem().Interface() ++} ++ ++// AsBoolSlice converts a bool array into a slice into with same elements as array. ++func AsBoolSlice(v interface{}) []bool { ++ rv := reflect.ValueOf(v) ++ if rv.Type().Kind() != reflect.Array { ++ return nil ++ } ++ var zero bool ++ correctLen := rv.Len() ++ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) ++ cpy := reflect.New(correctType) ++ _ = reflect.Copy(cpy.Elem(), rv) ++ return cpy.Elem().Slice(0, correctLen).Interface().([]bool) ++} ++ ++// AsInt64Slice converts an int64 array into a slice into with same elements as array. ++func AsInt64Slice(v interface{}) []int64 { ++ rv := reflect.ValueOf(v) ++ if rv.Type().Kind() != reflect.Array { ++ return nil ++ } ++ var zero int64 ++ correctLen := rv.Len() ++ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) ++ cpy := reflect.New(correctType) ++ _ = reflect.Copy(cpy.Elem(), rv) ++ return cpy.Elem().Slice(0, correctLen).Interface().([]int64) ++} ++ ++// AsFloat64Slice converts a float64 array into a slice into with same elements as array. ++func AsFloat64Slice(v interface{}) []float64 { ++ rv := reflect.ValueOf(v) ++ if rv.Type().Kind() != reflect.Array { ++ return nil ++ } ++ var zero float64 ++ correctLen := rv.Len() ++ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) ++ cpy := reflect.New(correctType) ++ _ = reflect.Copy(cpy.Elem(), rv) ++ return cpy.Elem().Slice(0, correctLen).Interface().([]float64) ++} ++ ++// AsStringSlice converts a string array into a slice into with same elements as array. ++func AsStringSlice(v interface{}) []string { ++ rv := reflect.ValueOf(v) ++ if rv.Type().Kind() != reflect.Array { ++ return nil ++ } ++ var zero string ++ correctLen := rv.Len() ++ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) ++ cpy := reflect.New(correctType) ++ _ = reflect.Copy(cpy.Elem(), rv) ++ return cpy.Elem().Slice(0, correctLen).Interface().([]string) ++} +diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go +new file mode 100644 +index 00000000000..f532f07e9e5 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/internal/gen.go +@@ -0,0 +1,29 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package internal // import "go.opentelemetry.io/otel/internal" ++ ++//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go ++//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go ++//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go ++ ++//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go ++//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go ++//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go ++//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go ++//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go ++//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go ++//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go ++//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go ++//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go +new file mode 100644 +index 00000000000..5e9b8304792 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go +@@ -0,0 +1,102 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package global // import "go.opentelemetry.io/otel/internal/global" ++ ++import ( ++ "log" ++ "os" ++ "sync/atomic" ++) ++ ++var ( ++ // GlobalErrorHandler provides an ErrorHandler that can be used ++ // throughout an OpenTelemetry instrumented project. When a user ++ // specified ErrorHandler is registered (`SetErrorHandler`) all calls to ++ // `Handle` and will be delegated to the registered ErrorHandler. ++ GlobalErrorHandler = defaultErrorHandler() ++ ++ // Compile-time check that delegator implements ErrorHandler. ++ _ ErrorHandler = (*ErrDelegator)(nil) ++ // Compile-time check that errLogger implements ErrorHandler. ++ _ ErrorHandler = (*ErrLogger)(nil) ++) ++ ++// ErrorHandler handles irremediable events. ++type ErrorHandler interface { ++ // Handle handles any error deemed irremediable by an OpenTelemetry ++ // component. ++ Handle(error) ++} ++ ++type ErrDelegator struct { ++ delegate atomic.Pointer[ErrorHandler] ++} ++ ++func (d *ErrDelegator) Handle(err error) { ++ d.getDelegate().Handle(err) ++} ++ ++func (d *ErrDelegator) getDelegate() ErrorHandler { ++ return *d.delegate.Load() ++} ++ ++// setDelegate sets the ErrorHandler delegate. ++func (d *ErrDelegator) setDelegate(eh ErrorHandler) { ++ d.delegate.Store(&eh) ++} ++ ++func defaultErrorHandler() *ErrDelegator { ++ d := &ErrDelegator{} ++ d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) ++ return d ++} ++ ++// ErrLogger logs errors if no delegate is set, otherwise they are delegated. ++type ErrLogger struct { ++ l *log.Logger ++} ++ ++// Handle logs err if no delegate is set, otherwise it is delegated. ++func (h *ErrLogger) Handle(err error) { ++ h.l.Print(err) ++} ++ ++// GetErrorHandler returns the global ErrorHandler instance. ++// ++// The default ErrorHandler instance returned will log all errors to STDERR ++// until an override ErrorHandler is set with SetErrorHandler. All ++// ErrorHandler returned prior to this will automatically forward errors to ++// the set instance instead of logging. ++// ++// Subsequent calls to SetErrorHandler after the first will not forward errors ++// to the new ErrorHandler for prior returned instances. ++func GetErrorHandler() ErrorHandler { ++ return GlobalErrorHandler ++} ++ ++// SetErrorHandler sets the global ErrorHandler to h. ++// ++// The first time this is called all ErrorHandler previously returned from ++// GetErrorHandler will send errors to h instead of the default logging ++// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not ++// delegate errors to h. ++func SetErrorHandler(h ErrorHandler) { ++ GlobalErrorHandler.setDelegate(h) ++} ++ ++// Handle is a convenience function for ErrorHandler().Handle(err). ++func Handle(err error) { ++ GetErrorHandler().Handle(err) ++} +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +new file mode 100644 +index 00000000000..ebb13c20678 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +@@ -0,0 +1,371 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package global // import "go.opentelemetry.io/otel/internal/global" ++ ++import ( ++ "context" ++ "sync/atomic" ++ ++ "go.opentelemetry.io/otel/metric" ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// unwrapper unwraps to return the underlying instrument implementation. ++type unwrapper interface { ++ Unwrap() metric.Observable ++} ++ ++type afCounter struct { ++ embedded.Float64ObservableCounter ++ metric.Float64Observable ++ ++ name string ++ opts []metric.Float64ObservableCounterOption ++ ++ delegate atomic.Value // metric.Float64ObservableCounter ++} ++ ++var ( ++ _ unwrapper = (*afCounter)(nil) ++ _ metric.Float64ObservableCounter = (*afCounter)(nil) ++) ++ ++func (i *afCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64ObservableCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *afCounter) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Float64ObservableCounter) ++ } ++ return nil ++} ++ ++type afUpDownCounter struct { ++ embedded.Float64ObservableUpDownCounter ++ metric.Float64Observable ++ ++ name string ++ opts []metric.Float64ObservableUpDownCounterOption ++ ++ delegate atomic.Value // metric.Float64ObservableUpDownCounter ++} ++ ++var ( ++ _ unwrapper = (*afUpDownCounter)(nil) ++ _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) ++) ++ ++func (i *afUpDownCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *afUpDownCounter) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Float64ObservableUpDownCounter) ++ } ++ return nil ++} ++ ++type afGauge struct { ++ embedded.Float64ObservableGauge ++ metric.Float64Observable ++ ++ name string ++ opts []metric.Float64ObservableGaugeOption ++ ++ delegate atomic.Value // metric.Float64ObservableGauge ++} ++ ++var ( ++ _ unwrapper = (*afGauge)(nil) ++ _ metric.Float64ObservableGauge = (*afGauge)(nil) ++) ++ ++func (i *afGauge) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64ObservableGauge(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *afGauge) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Float64ObservableGauge) ++ } ++ return nil ++} ++ ++type aiCounter struct { ++ embedded.Int64ObservableCounter ++ metric.Int64Observable ++ ++ name string ++ opts []metric.Int64ObservableCounterOption ++ ++ delegate atomic.Value // metric.Int64ObservableCounter ++} ++ ++var ( ++ _ unwrapper = (*aiCounter)(nil) ++ _ metric.Int64ObservableCounter = (*aiCounter)(nil) ++) ++ ++func (i *aiCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64ObservableCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *aiCounter) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Int64ObservableCounter) ++ } ++ return nil ++} ++ ++type aiUpDownCounter struct { ++ embedded.Int64ObservableUpDownCounter ++ metric.Int64Observable ++ ++ name string ++ opts []metric.Int64ObservableUpDownCounterOption ++ ++ delegate atomic.Value // metric.Int64ObservableUpDownCounter ++} ++ ++var ( ++ _ unwrapper = (*aiUpDownCounter)(nil) ++ _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) ++) ++ ++func (i *aiUpDownCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *aiUpDownCounter) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Int64ObservableUpDownCounter) ++ } ++ return nil ++} ++ ++type aiGauge struct { ++ embedded.Int64ObservableGauge ++ metric.Int64Observable ++ ++ name string ++ opts []metric.Int64ObservableGaugeOption ++ ++ delegate atomic.Value // metric.Int64ObservableGauge ++} ++ ++var ( ++ _ unwrapper = (*aiGauge)(nil) ++ _ metric.Int64ObservableGauge = (*aiGauge)(nil) ++) ++ ++func (i *aiGauge) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64ObservableGauge(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *aiGauge) Unwrap() metric.Observable { ++ if ctr := i.delegate.Load(); ctr != nil { ++ return ctr.(metric.Int64ObservableGauge) ++ } ++ return nil ++} ++ ++// Sync Instruments. ++type sfCounter struct { ++ embedded.Float64Counter ++ ++ name string ++ opts []metric.Float64CounterOption ++ ++ delegate atomic.Value // metric.Float64Counter ++} ++ ++var _ metric.Float64Counter = (*sfCounter)(nil) ++ ++func (i *sfCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64Counter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Float64Counter).Add(ctx, incr, opts...) ++ } ++} ++ ++type sfUpDownCounter struct { ++ embedded.Float64UpDownCounter ++ ++ name string ++ opts []metric.Float64UpDownCounterOption ++ ++ delegate atomic.Value // metric.Float64UpDownCounter ++} ++ ++var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) ++ ++func (i *sfUpDownCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64UpDownCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) ++ } ++} ++ ++type sfHistogram struct { ++ embedded.Float64Histogram ++ ++ name string ++ opts []metric.Float64HistogramOption ++ ++ delegate atomic.Value // metric.Float64Histogram ++} ++ ++var _ metric.Float64Histogram = (*sfHistogram)(nil) ++ ++func (i *sfHistogram) setDelegate(m metric.Meter) { ++ ctr, err := m.Float64Histogram(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Float64Histogram).Record(ctx, x, opts...) ++ } ++} ++ ++type siCounter struct { ++ embedded.Int64Counter ++ ++ name string ++ opts []metric.Int64CounterOption ++ ++ delegate atomic.Value // metric.Int64Counter ++} ++ ++var _ metric.Int64Counter = (*siCounter)(nil) ++ ++func (i *siCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64Counter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Int64Counter).Add(ctx, x, opts...) ++ } ++} ++ ++type siUpDownCounter struct { ++ embedded.Int64UpDownCounter ++ ++ name string ++ opts []metric.Int64UpDownCounterOption ++ ++ delegate atomic.Value // metric.Int64UpDownCounter ++} ++ ++var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) ++ ++func (i *siUpDownCounter) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64UpDownCounter(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) ++ } ++} ++ ++type siHistogram struct { ++ embedded.Int64Histogram ++ ++ name string ++ opts []metric.Int64HistogramOption ++ ++ delegate atomic.Value // metric.Int64Histogram ++} ++ ++var _ metric.Int64Histogram = (*siHistogram)(nil) ++ ++func (i *siHistogram) setDelegate(m metric.Meter) { ++ ctr, err := m.Int64Histogram(i.name, i.opts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ return ++ } ++ i.delegate.Store(ctr) ++} ++ ++func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { ++ if ctr := i.delegate.Load(); ctr != nil { ++ ctr.(metric.Int64Histogram).Record(ctx, x, opts...) ++ } ++} +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +index ccb3258711a..c6f305a2b76 100644 +--- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go ++++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +@@ -17,47 +17,53 @@ package global // import "go.opentelemetry.io/otel/internal/global" + import ( + "log" + "os" +- "sync" ++ "sync/atomic" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" + ) + +-// globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals. ++// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. + // + // The default logger uses stdr which is backed by the standard `log.Logger` + // interface. This logger will only show messages at the Error Level. +-var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) +-var globalLoggerLock = &sync.RWMutex{} ++var globalLogger atomic.Pointer[logr.Logger] ++ ++func init() { ++ SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) ++} + + // SetLogger overrides the globalLogger with l. + // +-// To see Info messages use a logger with `l.V(1).Enabled() == true` +-// To see Debug messages use a logger with `l.V(5).Enabled() == true`. ++// To see Warn messages use a logger with `l.V(1).Enabled() == true` ++// To see Info messages use a logger with `l.V(4).Enabled() == true` ++// To see Debug messages use a logger with `l.V(8).Enabled() == true`. + func SetLogger(l logr.Logger) { +- globalLoggerLock.Lock() +- defer globalLoggerLock.Unlock() +- globalLogger = l ++ globalLogger.Store(&l) ++} ++ ++func getLogger() logr.Logger { ++ return *globalLogger.Load() + } + + // Info prints messages about the general state of the API or SDK. +-// This should usually be less then 5 messages a minute. ++// This should usually be less than 5 messages a minute. + func Info(msg string, keysAndValues ...interface{}) { +- globalLoggerLock.RLock() +- defer globalLoggerLock.RUnlock() +- globalLogger.V(1).Info(msg, keysAndValues...) ++ getLogger().V(4).Info(msg, keysAndValues...) + } + + // Error prints messages about exceptional states of the API or SDK. + func Error(err error, msg string, keysAndValues ...interface{}) { +- globalLoggerLock.RLock() +- defer globalLoggerLock.RUnlock() +- globalLogger.Error(err, msg, keysAndValues...) ++ getLogger().Error(err, msg, keysAndValues...) + } + + // Debug prints messages about all internal changes in the API or SDK. + func Debug(msg string, keysAndValues ...interface{}) { +- globalLoggerLock.RLock() +- defer globalLoggerLock.RUnlock() +- globalLogger.V(5).Info(msg, keysAndValues...) ++ getLogger().V(8).Info(msg, keysAndValues...) ++} ++ ++// Warn prints messages about warnings in the API or SDK. ++// Not an error but is likely more important than an informational event. ++func Warn(msg string, keysAndValues ...interface{}) { ++ getLogger().V(1).Info(msg, keysAndValues...) + } +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go +new file mode 100644 +index 00000000000..0097db478c6 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go +@@ -0,0 +1,354 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package global // import "go.opentelemetry.io/otel/internal/global" ++ ++import ( ++ "container/list" ++ "sync" ++ "sync/atomic" ++ ++ "go.opentelemetry.io/otel/metric" ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// meterProvider is a placeholder for a configured SDK MeterProvider. ++// ++// All MeterProvider functionality is forwarded to a delegate once ++// configured. ++type meterProvider struct { ++ embedded.MeterProvider ++ ++ mtx sync.Mutex ++ meters map[il]*meter ++ ++ delegate metric.MeterProvider ++} ++ ++// setDelegate configures p to delegate all MeterProvider functionality to ++// provider. ++// ++// All Meters provided prior to this function call are switched out to be ++// Meters provided by provider. All instruments and callbacks are recreated and ++// delegated. ++// ++// It is guaranteed by the caller that this happens only once. ++func (p *meterProvider) setDelegate(provider metric.MeterProvider) { ++ p.mtx.Lock() ++ defer p.mtx.Unlock() ++ ++ p.delegate = provider ++ ++ if len(p.meters) == 0 { ++ return ++ } ++ ++ for _, meter := range p.meters { ++ meter.setDelegate(provider) ++ } ++ ++ p.meters = nil ++} ++ ++// Meter implements MeterProvider. ++func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { ++ p.mtx.Lock() ++ defer p.mtx.Unlock() ++ ++ if p.delegate != nil { ++ return p.delegate.Meter(name, opts...) ++ } ++ ++ // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. ++ ++ c := metric.NewMeterConfig(opts...) ++ key := il{ ++ name: name, ++ version: c.InstrumentationVersion(), ++ } ++ ++ if p.meters == nil { ++ p.meters = make(map[il]*meter) ++ } ++ ++ if val, ok := p.meters[key]; ok { ++ return val ++ } ++ ++ t := &meter{name: name, opts: opts} ++ p.meters[key] = t ++ return t ++} ++ ++// meter is a placeholder for a metric.Meter. ++// ++// All Meter functionality is forwarded to a delegate once configured. ++// Otherwise, all functionality is forwarded to a NoopMeter. ++type meter struct { ++ embedded.Meter ++ ++ name string ++ opts []metric.MeterOption ++ ++ mtx sync.Mutex ++ instruments []delegatedInstrument ++ ++ registry list.List ++ ++ delegate atomic.Value // metric.Meter ++} ++ ++type delegatedInstrument interface { ++ setDelegate(metric.Meter) ++} ++ ++// setDelegate configures m to delegate all Meter functionality to Meters ++// created by provider. ++// ++// All subsequent calls to the Meter methods will be passed to the delegate. ++// ++// It is guaranteed by the caller that this happens only once. ++func (m *meter) setDelegate(provider metric.MeterProvider) { ++ meter := provider.Meter(m.name, m.opts...) ++ m.delegate.Store(meter) ++ ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ ++ for _, inst := range m.instruments { ++ inst.setDelegate(meter) ++ } ++ ++ for e := m.registry.Front(); e != nil; e = e.Next() { ++ r := e.Value.(*registration) ++ r.setDelegate(meter) ++ m.registry.Remove(e) ++ } ++ ++ m.instruments = nil ++ m.registry.Init() ++} ++ ++func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64Counter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &siCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64UpDownCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &siUpDownCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64Histogram(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &siHistogram{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64ObservableCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &aiCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64ObservableUpDownCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &aiUpDownCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Int64ObservableGauge(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &aiGauge{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64Counter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &sfCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64UpDownCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &sfUpDownCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64Histogram(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &sfHistogram{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64ObservableCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &afCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64ObservableUpDownCounter(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &afUpDownCounter{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ return del.Float64ObservableGauge(name, options...) ++ } ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ i := &afGauge{name: name, opts: options} ++ m.instruments = append(m.instruments, i) ++ return i, nil ++} ++ ++// RegisterCallback captures the function that will be called during Collect. ++func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { ++ if del, ok := m.delegate.Load().(metric.Meter); ok { ++ insts = unwrapInstruments(insts) ++ return del.RegisterCallback(f, insts...) ++ } ++ ++ m.mtx.Lock() ++ defer m.mtx.Unlock() ++ ++ reg := ®istration{instruments: insts, function: f} ++ e := m.registry.PushBack(reg) ++ reg.unreg = func() error { ++ m.mtx.Lock() ++ _ = m.registry.Remove(e) ++ m.mtx.Unlock() ++ return nil ++ } ++ return reg, nil ++} ++ ++type wrapped interface { ++ unwrap() metric.Observable ++} ++ ++func unwrapInstruments(instruments []metric.Observable) []metric.Observable { ++ out := make([]metric.Observable, 0, len(instruments)) ++ ++ for _, inst := range instruments { ++ if in, ok := inst.(wrapped); ok { ++ out = append(out, in.unwrap()) ++ } else { ++ out = append(out, inst) ++ } ++ } ++ ++ return out ++} ++ ++type registration struct { ++ embedded.Registration ++ ++ instruments []metric.Observable ++ function metric.Callback ++ ++ unreg func() error ++ unregMu sync.Mutex ++} ++ ++func (c *registration) setDelegate(m metric.Meter) { ++ insts := unwrapInstruments(c.instruments) ++ ++ c.unregMu.Lock() ++ defer c.unregMu.Unlock() ++ ++ if c.unreg == nil { ++ // Unregister already called. ++ return ++ } ++ ++ reg, err := m.RegisterCallback(c.function, insts...) ++ if err != nil { ++ GetErrorHandler().Handle(err) ++ } ++ ++ c.unreg = reg.Unregister ++} ++ ++func (c *registration) Unregister() error { ++ c.unregMu.Lock() ++ defer c.unregMu.Unlock() ++ if c.unreg == nil { ++ // Unregister already called. ++ return nil ++ } ++ ++ var err error ++ err, c.unreg = c.unreg(), nil ++ return err ++} +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go +index 1ad38f828ec..7985005bcb6 100644 +--- a/vendor/go.opentelemetry.io/otel/internal/global/state.go ++++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go +@@ -19,6 +19,7 @@ import ( + "sync" + "sync/atomic" + ++ "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" + ) +@@ -31,14 +32,20 @@ type ( + propagatorsHolder struct { + tm propagation.TextMapPropagator + } ++ ++ meterProviderHolder struct { ++ mp metric.MeterProvider ++ } + ) + + var ( +- globalTracer = defaultTracerValue() +- globalPropagators = defaultPropagatorsValue() ++ globalTracer = defaultTracerValue() ++ globalPropagators = defaultPropagatorsValue() ++ globalMeterProvider = defaultMeterProvider() + + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once ++ delegateMeterOnce sync.Once + ) + + // TracerProvider is the internal implementation for global.TracerProvider. +@@ -102,6 +109,34 @@ func SetTextMapPropagator(p propagation.TextMapPropagator) { + globalPropagators.Store(propagatorsHolder{tm: p}) + } + ++// MeterProvider is the internal implementation for global.MeterProvider. ++func MeterProvider() metric.MeterProvider { ++ return globalMeterProvider.Load().(meterProviderHolder).mp ++} ++ ++// SetMeterProvider is the internal implementation for global.SetMeterProvider. ++func SetMeterProvider(mp metric.MeterProvider) { ++ current := MeterProvider() ++ if _, cOk := current.(*meterProvider); cOk { ++ if _, mpOk := mp.(*meterProvider); mpOk && current == mp { ++ // Do not assign the default delegating MeterProvider to delegate ++ // to itself. ++ Error( ++ errors.New("no delegate configured in meter provider"), ++ "Setting meter provider to it's current value. No delegate will be configured", ++ ) ++ return ++ } ++ } ++ ++ delegateMeterOnce.Do(func() { ++ if def, ok := current.(*meterProvider); ok { ++ def.setDelegate(mp) ++ } ++ }) ++ globalMeterProvider.Store(meterProviderHolder{mp: mp}) ++} ++ + func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) +@@ -113,3 +148,9 @@ func defaultPropagatorsValue() *atomic.Value { + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v + } ++ ++func defaultMeterProvider() *atomic.Value { ++ v := &atomic.Value{} ++ v.Store(meterProviderHolder{mp: &meterProvider{}}) ++ return v ++} +diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go +index 5f008d0982b..3f61ec12a34 100644 +--- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go ++++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go +@@ -39,6 +39,7 @@ import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" ++ "go.opentelemetry.io/otel/trace/embedded" + ) + + // tracerProvider is a placeholder for a configured SDK TracerProvider. +@@ -46,6 +47,8 @@ import ( + // All TracerProvider functionality is forwarded to a delegate once + // configured. + type tracerProvider struct { ++ embedded.TracerProvider ++ + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +@@ -119,6 +122,8 @@ type il struct { + // All Tracer functionality is forwarded to a delegate once configured. + // Otherwise, all functionality is forwarded to a NoopTracer. + type tracer struct { ++ embedded.Tracer ++ + name string + opts []trace.TracerOption + provider *tracerProvider +@@ -156,6 +161,8 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart + // SpanContext. It performs no operations other than to return the wrapped + // SpanContext. + type nonRecordingSpan struct { ++ embedded.Span ++ + sc trace.SpanContext + tracer *tracer + } +diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go +new file mode 100644 +index 00000000000..f955171951f +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric.go +@@ -0,0 +1,53 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package otel // import "go.opentelemetry.io/otel" ++ ++import ( ++ "go.opentelemetry.io/otel/internal/global" ++ "go.opentelemetry.io/otel/metric" ++) ++ ++// Meter returns a Meter from the global MeterProvider. The name must be the ++// name of the library providing instrumentation. This name may be the same as ++// the instrumented code only if that code provides built-in instrumentation. ++// If the name is empty, then a implementation defined default name will be ++// used instead. ++// ++// If this is called before a global MeterProvider is registered the returned ++// Meter will be a No-op implementation of a Meter. When a global MeterProvider ++// is registered for the first time, the returned Meter, and all the ++// instruments it has created or will create, are recreated automatically from ++// the new MeterProvider. ++// ++// This is short for GetMeterProvider().Meter(name). ++func Meter(name string, opts ...metric.MeterOption) metric.Meter { ++ return GetMeterProvider().Meter(name, opts...) ++} ++ ++// GetMeterProvider returns the registered global meter provider. ++// ++// If no global GetMeterProvider has been registered, a No-op GetMeterProvider ++// implementation is returned. When a global GetMeterProvider is registered for ++// the first time, the returned GetMeterProvider, and all the Meters it has ++// created or will create, are recreated automatically from the new ++// GetMeterProvider. ++func GetMeterProvider() metric.MeterProvider { ++ return global.MeterProvider() ++} ++ ++// SetMeterProvider registers mp as the global MeterProvider. ++func SetMeterProvider(mp metric.MeterProvider) { ++ global.SetMeterProvider(mp) ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +new file mode 100644 +index 00000000000..072baa8e8d0 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +@@ -0,0 +1,271 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package metric // import "go.opentelemetry.io/otel/metric" ++ ++import ( ++ "context" ++ ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// Float64Observable describes a set of instruments used asynchronously to ++// record float64 measurements once per collection cycle. Observations of ++// these instruments are only made within a callback. ++// ++// Warning: Methods may be added to this interface in minor releases. ++type Float64Observable interface { ++ Observable ++ ++ float64Observable() ++} ++ ++// Float64ObservableCounter is an instrument used to asynchronously record ++// increasing float64 measurements once per collection cycle. Observations are ++// only made within a callback for this instrument. The value observed is ++// assumed the to be the cumulative sum of the count. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for ++// unimplemented methods. ++type Float64ObservableCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64ObservableCounter ++ ++ Float64Observable ++} ++ ++// Float64ObservableCounterConfig contains options for asynchronous counter ++// instruments that record int64 values. ++type Float64ObservableCounterConfig struct { ++ description string ++ unit string ++ callbacks []Float64Callback ++} ++ ++// NewFloat64ObservableCounterConfig returns a new ++// [Float64ObservableCounterConfig] with all opts applied. ++func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { ++ var config Float64ObservableCounterConfig ++ for _, o := range opts { ++ config = o.applyFloat64ObservableCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64ObservableCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64ObservableCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { ++ return c.callbacks ++} ++ ++// Float64ObservableCounterOption applies options to a ++// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and ++// [InstrumentOption] for other options that can be used as a ++// Float64ObservableCounterOption. ++type Float64ObservableCounterOption interface { ++ applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig ++} ++ ++// Float64ObservableUpDownCounter is an instrument used to asynchronously ++// record float64 measurements once per collection cycle. Observations are only ++// made within a callback for this instrument. The value observed is assumed ++// the to be the cumulative sum of the count. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64ObservableUpDownCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64ObservableUpDownCounter ++ ++ Float64Observable ++} ++ ++// Float64ObservableUpDownCounterConfig contains options for asynchronous ++// counter instruments that record int64 values. ++type Float64ObservableUpDownCounterConfig struct { ++ description string ++ unit string ++ callbacks []Float64Callback ++} ++ ++// NewFloat64ObservableUpDownCounterConfig returns a new ++// [Float64ObservableUpDownCounterConfig] with all opts applied. ++func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { ++ var config Float64ObservableUpDownCounterConfig ++ for _, o := range opts { ++ config = o.applyFloat64ObservableUpDownCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64ObservableUpDownCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64ObservableUpDownCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { ++ return c.callbacks ++} ++ ++// Float64ObservableUpDownCounterOption applies options to a ++// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and ++// [InstrumentOption] for other options that can be used as a ++// Float64ObservableUpDownCounterOption. ++type Float64ObservableUpDownCounterOption interface { ++ applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig ++} ++ ++// Float64ObservableGauge is an instrument used to asynchronously record ++// instantaneous float64 measurements once per collection cycle. Observations ++// are only made within a callback for this instrument. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64ObservableGauge interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64ObservableGauge ++ ++ Float64Observable ++} ++ ++// Float64ObservableGaugeConfig contains options for asynchronous counter ++// instruments that record int64 values. ++type Float64ObservableGaugeConfig struct { ++ description string ++ unit string ++ callbacks []Float64Callback ++} ++ ++// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] ++// with all opts applied. ++func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { ++ var config Float64ObservableGaugeConfig ++ for _, o := range opts { ++ config = o.applyFloat64ObservableGauge(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64ObservableGaugeConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64ObservableGaugeConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { ++ return c.callbacks ++} ++ ++// Float64ObservableGaugeOption applies options to a ++// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and ++// [InstrumentOption] for other options that can be used as a ++// Float64ObservableGaugeOption. ++type Float64ObservableGaugeOption interface { ++ applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig ++} ++ ++// Float64Observer is a recorder of float64 measurements. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64Observer interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64Observer ++ ++ // Observe records the float64 value. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Observe(value float64, options ...ObserveOption) ++} ++ ++// Float64Callback is a function registered with a Meter that makes ++// observations for a Float64Observerable instrument it is registered with. ++// Calls to the Float64Observer record measurement values for the ++// Float64Observable. ++// ++// The function needs to complete in a finite amount of time and the deadline ++// of the passed context is expected to be honored. ++// ++// The function needs to make unique observations across all registered ++// Float64Callbacks. Meaning, it should not report measurements with the same ++// attributes as another Float64Callbacks also registered for the same ++// instrument. ++// ++// The function needs to be concurrent safe. ++type Float64Callback func(context.Context, Float64Observer) error ++ ++// Float64ObservableOption applies options to float64 Observer instruments. ++type Float64ObservableOption interface { ++ Float64ObservableCounterOption ++ Float64ObservableUpDownCounterOption ++ Float64ObservableGaugeOption ++} ++ ++type float64CallbackOpt struct { ++ cback Float64Callback ++} ++ ++func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++// WithFloat64Callback adds callback to be called for an instrument. ++func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { ++ return float64CallbackOpt{callback} ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +new file mode 100644 +index 00000000000..9bd6ebf0205 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +@@ -0,0 +1,269 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package metric // import "go.opentelemetry.io/otel/metric" ++ ++import ( ++ "context" ++ ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// Int64Observable describes a set of instruments used asynchronously to record ++// int64 measurements once per collection cycle. Observations of these ++// instruments are only made within a callback. ++// ++// Warning: Methods may be added to this interface in minor releases. ++type Int64Observable interface { ++ Observable ++ ++ int64Observable() ++} ++ ++// Int64ObservableCounter is an instrument used to asynchronously record ++// increasing int64 measurements once per collection cycle. Observations are ++// only made within a callback for this instrument. The value observed is ++// assumed the to be the cumulative sum of the count. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64ObservableCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64ObservableCounter ++ ++ Int64Observable ++} ++ ++// Int64ObservableCounterConfig contains options for asynchronous counter ++// instruments that record int64 values. ++type Int64ObservableCounterConfig struct { ++ description string ++ unit string ++ callbacks []Int64Callback ++} ++ ++// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] ++// with all opts applied. ++func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { ++ var config Int64ObservableCounterConfig ++ for _, o := range opts { ++ config = o.applyInt64ObservableCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64ObservableCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64ObservableCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { ++ return c.callbacks ++} ++ ++// Int64ObservableCounterOption applies options to a ++// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and ++// [InstrumentOption] for other options that can be used as an ++// Int64ObservableCounterOption. ++type Int64ObservableCounterOption interface { ++ applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig ++} ++ ++// Int64ObservableUpDownCounter is an instrument used to asynchronously record ++// int64 measurements once per collection cycle. Observations are only made ++// within a callback for this instrument. The value observed is assumed the to ++// be the cumulative sum of the count. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64ObservableUpDownCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64ObservableUpDownCounter ++ ++ Int64Observable ++} ++ ++// Int64ObservableUpDownCounterConfig contains options for asynchronous counter ++// instruments that record int64 values. ++type Int64ObservableUpDownCounterConfig struct { ++ description string ++ unit string ++ callbacks []Int64Callback ++} ++ ++// NewInt64ObservableUpDownCounterConfig returns a new ++// [Int64ObservableUpDownCounterConfig] with all opts applied. ++func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { ++ var config Int64ObservableUpDownCounterConfig ++ for _, o := range opts { ++ config = o.applyInt64ObservableUpDownCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64ObservableUpDownCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64ObservableUpDownCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { ++ return c.callbacks ++} ++ ++// Int64ObservableUpDownCounterOption applies options to a ++// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and ++// [InstrumentOption] for other options that can be used as an ++// Int64ObservableUpDownCounterOption. ++type Int64ObservableUpDownCounterOption interface { ++ applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig ++} ++ ++// Int64ObservableGauge is an instrument used to asynchronously record ++// instantaneous int64 measurements once per collection cycle. Observations are ++// only made within a callback for this instrument. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64ObservableGauge interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64ObservableGauge ++ ++ Int64Observable ++} ++ ++// Int64ObservableGaugeConfig contains options for asynchronous counter ++// instruments that record int64 values. ++type Int64ObservableGaugeConfig struct { ++ description string ++ unit string ++ callbacks []Int64Callback ++} ++ ++// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] ++// with all opts applied. ++func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { ++ var config Int64ObservableGaugeConfig ++ for _, o := range opts { ++ config = o.applyInt64ObservableGauge(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64ObservableGaugeConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64ObservableGaugeConfig) Unit() string { ++ return c.unit ++} ++ ++// Callbacks returns the configured callbacks. ++func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { ++ return c.callbacks ++} ++ ++// Int64ObservableGaugeOption applies options to a ++// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and ++// [InstrumentOption] for other options that can be used as an ++// Int64ObservableGaugeOption. ++type Int64ObservableGaugeOption interface { ++ applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig ++} ++ ++// Int64Observer is a recorder of int64 measurements. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64Observer interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64Observer ++ ++ // Observe records the int64 value. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Observe(value int64, options ...ObserveOption) ++} ++ ++// Int64Callback is a function registered with a Meter that makes observations ++// for an Int64Observerable instrument it is registered with. Calls to the ++// Int64Observer record measurement values for the Int64Observable. ++// ++// The function needs to complete in a finite amount of time and the deadline ++// of the passed context is expected to be honored. ++// ++// The function needs to make unique observations across all registered ++// Int64Callbacks. Meaning, it should not report measurements with the same ++// attributes as another Int64Callbacks also registered for the same ++// instrument. ++// ++// The function needs to be concurrent safe. ++type Int64Callback func(context.Context, Int64Observer) error ++ ++// Int64ObservableOption applies options to int64 Observer instruments. ++type Int64ObservableOption interface { ++ Int64ObservableCounterOption ++ Int64ObservableUpDownCounterOption ++ Int64ObservableGaugeOption ++} ++ ++type int64CallbackOpt struct { ++ cback Int64Callback ++} ++ ++func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { ++ cfg.callbacks = append(cfg.callbacks, o.cback) ++ return cfg ++} ++ ++// WithInt64Callback adds callback to be called for an instrument. ++func WithInt64Callback(callback Int64Callback) Int64ObservableOption { ++ return int64CallbackOpt{callback} ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go +index 621e4c5fcb8..778ad2d748b 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/config.go ++++ b/vendor/go.opentelemetry.io/otel/metric/config.go +@@ -14,17 +14,30 @@ + + package metric // import "go.opentelemetry.io/otel/metric" + ++import "go.opentelemetry.io/otel/attribute" ++ + // MeterConfig contains options for Meters. + type MeterConfig struct { + instrumentationVersion string + schemaURL string ++ attrs attribute.Set ++ ++ // Ensure forward compatibility by explicitly making this not comparable. ++ noCmp [0]func() //nolint: unused // This is indeed used. + } + +-// InstrumentationVersion is the version of the library providing instrumentation. ++// InstrumentationVersion returns the version of the library providing ++// instrumentation. + func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion + } + ++// InstrumentationAttributes returns the attributes associated with the library ++// providing instrumentation. ++func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { ++ return cfg.attrs ++} ++ + // SchemaURL is the schema_url of the library providing instrumentation. + func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +@@ -60,6 +73,16 @@ func WithInstrumentationVersion(version string) MeterOption { + }) + } + ++// WithInstrumentationAttributes sets the instrumentation attributes. ++// ++// The passed attributes will be de-duplicated. ++func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { ++ return meterOptionFunc(func(config MeterConfig) MeterConfig { ++ config.attrs = attribute.NewSet(attr...) ++ return config ++ }) ++} ++ + // WithSchemaURL sets the schema URL. + func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { +diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go +index bd6f4343720..54716e13b35 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/doc.go ++++ b/vendor/go.opentelemetry.io/otel/metric/doc.go +@@ -13,11 +13,158 @@ + // limitations under the License. + + /* +-Package metric provides an implementation of the metrics part of the +-OpenTelemetry API. ++Package metric provides the OpenTelemetry API used to measure metrics about ++source code operation. + +-This package is currently in a pre-GA phase. Backwards incompatible changes +-may be introduced in subsequent minor version releases as we work to track the +-evolving OpenTelemetry specification and user feedback. ++This API is separate from its implementation so the instrumentation built from ++it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official ++OpenTelemetry implementation of this API. ++ ++All measurements made with this package are made via instruments. These ++instruments are created by a [Meter] which itself is created by a ++[MeterProvider]. Applications need to accept a [MeterProvider] implementation ++as a starting point when instrumenting. This can be done directly, or by using ++the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an ++appropriately named [Meter] from the accepted [MeterProvider], instrumentation ++can then be built from the [Meter]'s instruments. ++ ++# Instruments ++ ++Each instrument is designed to make measurements of a particular type. Broadly, ++all instruments fall into two overlapping logical categories: asynchronous or ++synchronous, and int64 or float64. ++ ++All synchronous instruments ([Int64Counter], [Int64UpDownCounter], ++[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and ++[Float64Histogram]) are used to measure the operation and performance of source ++code during the source code execution. These instruments only make measurements ++when the source code they instrument is run. ++ ++All asynchronous instruments ([Int64ObservableCounter], ++[Int64ObservableUpDownCounter], [Int64ObservableGauge], ++[Float64ObservableCounter], [Float64ObservableUpDownCounter], and ++[Float64ObservableGauge]) are used to measure metrics outside of the execution ++of source code. They are said to make "observations" via a callback function ++called once every measurement collection cycle. ++ ++Each instrument is also grouped by the value type it measures. Either int64 or ++float64. The value being measured will dictate which instrument in these ++categories to use. ++ ++Outside of these two broad categories, instruments are described by the ++function they are designed to serve. All Counters ([Int64Counter], ++[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are ++designed to measure values that never decrease in value, but instead only ++incrementally increase in value. UpDownCounters ([Int64UpDownCounter], ++[Float64UpDownCounter], [Int64ObservableUpDownCounter], and ++[Float64ObservableUpDownCounter]) on the other hand, are designed to measure ++values that can increase and decrease. When more information needs to be ++conveyed about all the synchronous measurements made during a collection cycle, ++a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, ++when just the most recent measurement needs to be conveyed about an ++asynchronous measurement, a Gauge ([Int64ObservableGauge] and ++[Float64ObservableGauge]) should be used. ++ ++See the [OpenTelemetry documentation] for more information about instruments ++and their intended use. ++ ++# Measurements ++ ++Measurements are made by recording values and information about the values with ++an instrument. How these measurements are recorded depends on the instrument. ++ ++Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], ++[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and ++[Float64Histogram]) are recorded using the instrument methods directly. All ++counter instruments have an Add method that is used to measure an increment ++value, and all histogram instruments have a Record method to measure a data ++point. ++ ++Asynchronous instruments ([Int64ObservableCounter], ++[Int64ObservableUpDownCounter], [Int64ObservableGauge], ++[Float64ObservableCounter], [Float64ObservableUpDownCounter], and ++[Float64ObservableGauge]) record measurements within a callback function. The ++callback is registered with the Meter which ensures the callback is called once ++per collection cycle. A callback can be registered two ways: during the ++instrument's creation using an option, or later using the RegisterCallback ++method of the [Meter] that created the instrument. ++ ++If the following criteria are met, an option ([WithInt64Callback] or ++[WithFloat64Callback]) can be used during the asynchronous instrument's ++creation to register a callback ([Int64Callback] or [Float64Callback], ++respectively): ++ ++ - The measurement process is known when the instrument is created ++ - Only that instrument will make a measurement within the callback ++ - The callback never needs to be unregistered ++ ++If the criteria are not met, use the RegisterCallback method of the [Meter] that ++created the instrument to register a [Callback]. ++ ++# API Implementations ++ ++This package does not conform to the standard Go versioning policy, all of its ++interfaces may have methods added to them without a package major version bump. ++This non-standard API evolution could surprise an uninformed implementation ++author. They could unknowingly build their implementation in a way that would ++result in a runtime panic for their users that update to the new API. ++ ++The API is designed to help inform an instrumentation author about this ++non-standard API evolution. It requires them to choose a default behavior for ++unimplemented interface methods. There are three behavior choices they can ++make: ++ ++ - Compilation failure ++ - Panic ++ - Default to another implementation ++ ++All interfaces in this API embed a corresponding interface from ++[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default ++behavior of their implementations to be a compilation failure, signaling to ++their users they need to update to the latest version of that implementation, ++they need to embed the corresponding interface from ++[go.opentelemetry.io/otel/metric/embedded] in their implementation. For ++example, ++ ++ import "go.opentelemetry.io/otel/metric/embedded" ++ ++ type MeterProvider struct { ++ embedded.MeterProvider ++ // ... ++ } ++ ++If an author wants the default behavior of their implementations to a panic, ++they need to embed the API interface directly. ++ ++ import "go.opentelemetry.io/otel/metric" ++ ++ type MeterProvider struct { ++ metric.MeterProvider ++ // ... ++ } ++ ++This is not a recommended behavior as it could lead to publishing packages that ++contain runtime panics when users update other package that use newer versions ++of [go.opentelemetry.io/otel/metric]. ++ ++Finally, an author can embed another implementation in theirs. The embedded ++implementation will be used for methods not defined by the author. For example, ++an author who wants to default to silently dropping the call can use ++[go.opentelemetry.io/otel/metric/noop]: ++ ++ import "go.opentelemetry.io/otel/metric/noop" ++ ++ type MeterProvider struct { ++ noop.MeterProvider ++ // ... ++ } ++ ++It is strongly recommended that authors only embed ++[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. ++That implementation is the only one OpenTelemetry authors can guarantee will ++fully implement all the API interfaces when a user updates their API. ++ ++[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ ++[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider + */ + package metric // import "go.opentelemetry.io/otel/metric" +diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +new file mode 100644 +index 00000000000..ae0bdbd2e64 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +@@ -0,0 +1,234 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package embedded provides interfaces embedded within the [OpenTelemetry ++// metric API]. ++// ++// Implementers of the [OpenTelemetry metric API] can embed the relevant type ++// from this package into their implementation directly. Doing so will result ++// in a compilation error for users when the [OpenTelemetry metric API] is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++// ++// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric ++package embedded // import "go.opentelemetry.io/otel/metric/embedded" ++ ++// MeterProvider is embedded in ++// [go.opentelemetry.io/otel/metric.MeterProvider]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to ++// experience a compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type MeterProvider interface{ meterProvider() } ++ ++// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a ++// compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface ++// is extended (which is something that can happen without a major version bump ++// of the API package). ++type Meter interface{ meter() } ++ ++// Float64Observer is embedded in ++// [go.opentelemetry.io/otel/metric.Float64Observer]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64Observer] if you want ++// users to experience a compilation error, signaling they need to update to ++// your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64Observer] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Float64Observer interface{ float64Observer() } ++ ++// Int64Observer is embedded in ++// [go.opentelemetry.io/otel/metric.Int64Observer]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users ++// to experience a compilation error, signaling they need to update to your ++// latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64Observer] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Int64Observer interface{ int64Observer() } ++ ++// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a ++// compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/metric.Observer] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Observer interface{ observer() } ++ ++// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Registration] if you want users to ++// experience a compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/metric.Registration] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Registration interface{ registration() } ++ ++// Float64Counter is embedded in ++// [go.opentelemetry.io/otel/metric.Float64Counter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64Counter] if you want ++// users to experience a compilation error, signaling they need to update to ++// your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64Counter] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Float64Counter interface{ float64Counter() } ++ ++// Float64Histogram is embedded in ++// [go.opentelemetry.io/otel/metric.Float64Histogram]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want ++// users to experience a compilation error, signaling they need to update to ++// your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Float64Histogram interface{ float64Histogram() } ++ ++// Float64ObservableCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you ++// want users to experience a compilation error, signaling they need to update ++// to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Float64ObservableCounter interface{ float64ObservableCounter() } ++ ++// Float64ObservableGauge is embedded in ++// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you ++// want users to experience a compilation error, signaling they need to update ++// to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Float64ObservableGauge interface{ float64ObservableGauge() } ++ ++// Float64ObservableUpDownCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] ++// if you want users to experience a compilation error, signaling they need to ++// update to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } ++ ++// Float64UpDownCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you ++// want users to experience a compilation error, signaling they need to update ++// to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface ++// is extended (which is something that can happen without a major version bump ++// of the API package). ++type Float64UpDownCounter interface{ float64UpDownCounter() } ++ ++// Int64Counter is embedded in ++// [go.opentelemetry.io/otel/metric.Int64Counter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users ++// to experience a compilation error, signaling they need to update to your ++// latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64Counter] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Int64Counter interface{ int64Counter() } ++ ++// Int64Histogram is embedded in ++// [go.opentelemetry.io/otel/metric.Int64Histogram]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want ++// users to experience a compilation error, signaling they need to update to ++// your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Int64Histogram interface{ int64Histogram() } ++ ++// Int64ObservableCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you ++// want users to experience a compilation error, signaling they need to update ++// to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Int64ObservableCounter interface{ int64ObservableCounter() } ++ ++// Int64ObservableGauge is embedded in ++// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you ++// want users to experience a compilation error, signaling they need to update ++// to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface ++// is extended (which is something that can happen without a major version bump ++// of the API package). ++type Int64ObservableGauge interface{ int64ObservableGauge() } ++ ++// Int64ObservableUpDownCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if ++// you want users to experience a compilation error, signaling they need to ++// update to your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } ++ ++// Int64UpDownCounter is embedded in ++// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want ++// users to experience a compilation error, signaling they need to update to ++// your latest implementation, when the ++// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Int64UpDownCounter interface{ int64UpDownCounter() } +diff --git a/vendor/go.opentelemetry.io/otel/metric/global/global.go b/vendor/go.opentelemetry.io/otel/metric/global/global.go +deleted file mode 100644 +index 05a67c2e999..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/global/global.go ++++ /dev/null +@@ -1,42 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package global // import "go.opentelemetry.io/otel/metric/global" +- +-import ( +- "go.opentelemetry.io/otel/metric" +- "go.opentelemetry.io/otel/metric/internal/global" +-) +- +-// Meter returns a Meter from the global MeterProvider. The +-// instrumentationName must be the name of the library providing +-// instrumentation. This name may be the same as the instrumented code only if +-// that code provides built-in instrumentation. If the instrumentationName is +-// empty, then a implementation defined default name will be used instead. +-// +-// This is short for MeterProvider().Meter(name). +-func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { +- return MeterProvider().Meter(instrumentationName, opts...) +-} +- +-// MeterProvider returns the registered global trace provider. +-// If none is registered then a No-op MeterProvider is returned. +-func MeterProvider() metric.MeterProvider { +- return global.MeterProvider() +-} +- +-// SetMeterProvider registers `mp` as the global meter provider. +-func SetMeterProvider(mp metric.MeterProvider) { +- global.SetMeterProvider(mp) +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go +new file mode 100644 +index 00000000000..be89cd53341 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go +@@ -0,0 +1,357 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package metric // import "go.opentelemetry.io/otel/metric" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// Observable is used as a grouping mechanism for all instruments that are ++// updated within a Callback. ++type Observable interface { ++ observable() ++} ++ ++// InstrumentOption applies options to all instruments. ++type InstrumentOption interface { ++ Int64CounterOption ++ Int64UpDownCounterOption ++ Int64HistogramOption ++ Int64ObservableCounterOption ++ Int64ObservableUpDownCounterOption ++ Int64ObservableGaugeOption ++ ++ Float64CounterOption ++ Float64UpDownCounterOption ++ Float64HistogramOption ++ Float64ObservableCounterOption ++ Float64ObservableUpDownCounterOption ++ Float64ObservableGaugeOption ++} ++ ++// HistogramOption applies options to histogram instruments. ++type HistogramOption interface { ++ Int64HistogramOption ++ Float64HistogramOption ++} ++ ++type descOpt string ++ ++func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { ++ c.description = string(o) ++ return c ++} ++ ++func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { ++ c.description = string(o) ++ return c ++} ++ ++// WithDescription sets the instrument description. ++func WithDescription(desc string) InstrumentOption { return descOpt(desc) } ++ ++type unitOpt string ++ ++func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { ++ c.unit = string(o) ++ return c ++} ++ ++func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { ++ c.unit = string(o) ++ return c ++} ++ ++// WithUnit sets the instrument unit. ++// ++// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. ++func WithUnit(u string) InstrumentOption { return unitOpt(u) } ++ ++// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. ++// ++// This option is considered "advisory", and may be ignored by API implementations. ++func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } ++ ++type bucketOpt []float64 ++ ++func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { ++ c.explicitBucketBoundaries = o ++ return c ++} ++ ++func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { ++ c.explicitBucketBoundaries = o ++ return c ++} ++ ++// AddOption applies options to an addition measurement. See ++// [MeasurementOption] for other options that can be used as an AddOption. ++type AddOption interface { ++ applyAdd(AddConfig) AddConfig ++} ++ ++// AddConfig contains options for an addition measurement. ++type AddConfig struct { ++ attrs attribute.Set ++} ++ ++// NewAddConfig returns a new [AddConfig] with all opts applied. ++func NewAddConfig(opts []AddOption) AddConfig { ++ config := AddConfig{attrs: *attribute.EmptySet()} ++ for _, o := range opts { ++ config = o.applyAdd(config) ++ } ++ return config ++} ++ ++// Attributes returns the configured attribute set. ++func (c AddConfig) Attributes() attribute.Set { ++ return c.attrs ++} ++ ++// RecordOption applies options to an addition measurement. See ++// [MeasurementOption] for other options that can be used as a RecordOption. ++type RecordOption interface { ++ applyRecord(RecordConfig) RecordConfig ++} ++ ++// RecordConfig contains options for a recorded measurement. ++type RecordConfig struct { ++ attrs attribute.Set ++} ++ ++// NewRecordConfig returns a new [RecordConfig] with all opts applied. ++func NewRecordConfig(opts []RecordOption) RecordConfig { ++ config := RecordConfig{attrs: *attribute.EmptySet()} ++ for _, o := range opts { ++ config = o.applyRecord(config) ++ } ++ return config ++} ++ ++// Attributes returns the configured attribute set. ++func (c RecordConfig) Attributes() attribute.Set { ++ return c.attrs ++} ++ ++// ObserveOption applies options to an addition measurement. See ++// [MeasurementOption] for other options that can be used as a ObserveOption. ++type ObserveOption interface { ++ applyObserve(ObserveConfig) ObserveConfig ++} ++ ++// ObserveConfig contains options for an observed measurement. ++type ObserveConfig struct { ++ attrs attribute.Set ++} ++ ++// NewObserveConfig returns a new [ObserveConfig] with all opts applied. ++func NewObserveConfig(opts []ObserveOption) ObserveConfig { ++ config := ObserveConfig{attrs: *attribute.EmptySet()} ++ for _, o := range opts { ++ config = o.applyObserve(config) ++ } ++ return config ++} ++ ++// Attributes returns the configured attribute set. ++func (c ObserveConfig) Attributes() attribute.Set { ++ return c.attrs ++} ++ ++// MeasurementOption applies options to all instrument measurement. ++type MeasurementOption interface { ++ AddOption ++ RecordOption ++ ObserveOption ++} ++ ++type attrOpt struct { ++ set attribute.Set ++} ++ ++// mergeSets returns the union of keys between a and b. Any duplicate keys will ++// use the value associated with b. ++func mergeSets(a, b attribute.Set) attribute.Set { ++ // NewMergeIterator uses the first value for any duplicates. ++ iter := attribute.NewMergeIterator(&b, &a) ++ merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) ++ for iter.Next() { ++ merged = append(merged, iter.Attribute()) ++ } ++ return attribute.NewSet(merged...) ++} ++ ++func (o attrOpt) applyAdd(c AddConfig) AddConfig { ++ switch { ++ case o.set.Len() == 0: ++ case c.attrs.Len() == 0: ++ c.attrs = o.set ++ default: ++ c.attrs = mergeSets(c.attrs, o.set) ++ } ++ return c ++} ++ ++func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { ++ switch { ++ case o.set.Len() == 0: ++ case c.attrs.Len() == 0: ++ c.attrs = o.set ++ default: ++ c.attrs = mergeSets(c.attrs, o.set) ++ } ++ return c ++} ++ ++func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { ++ switch { ++ case o.set.Len() == 0: ++ case c.attrs.Len() == 0: ++ c.attrs = o.set ++ default: ++ c.attrs = mergeSets(c.attrs, o.set) ++ } ++ return c ++} ++ ++// WithAttributeSet sets the attribute Set associated with a measurement is ++// made with. ++// ++// If multiple WithAttributeSet or WithAttributes options are passed the ++// attributes will be merged together in the order they are passed. Attributes ++// with duplicate keys will use the last value passed. ++func WithAttributeSet(attributes attribute.Set) MeasurementOption { ++ return attrOpt{set: attributes} ++} ++ ++// WithAttributes converts attributes into an attribute Set and sets the Set to ++// be associated with a measurement. This is shorthand for: ++// ++// cp := make([]attribute.KeyValue, len(attributes)) ++// copy(cp, attributes) ++// WithAttributes(attribute.NewSet(cp...)) ++// ++// [attribute.NewSet] may modify the passed attributes so this will make a copy ++// of attributes before creating a set in order to ensure this function is ++// concurrent safe. This makes this option function less optimized in ++// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be ++// preferred for performance sensitive code. ++// ++// See [WithAttributeSet] for information about how multiple WithAttributes are ++// merged. ++func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { ++ cp := make([]attribute.KeyValue, len(attributes)) ++ copy(cp, attributes) ++ return attrOpt{set: attribute.NewSet(cp...)} ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go +deleted file mode 100644 +index 370715f694c..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go ++++ /dev/null +@@ -1,70 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package asyncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" +- +-import ( +- "context" +- +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric/instrument" +-) +- +-// InstrumentProvider provides access to individual instruments. +-type InstrumentProvider interface { +- // Counter creates an instrument for recording increasing values. +- Counter(name string, opts ...instrument.Option) (Counter, error) +- +- // UpDownCounter creates an instrument for recording changes of a value. +- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) +- +- // Gauge creates an instrument for recording the current value. +- Gauge(name string, opts ...instrument.Option) (Gauge, error) +-} +- +-// Counter is an instrument that records increasing values. +-type Counter interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +- +-// UpDownCounter is an instrument that records increasing or decreasing values. +-type UpDownCounter interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +- +-// Gauge is an instrument that records independent readings. +-type Gauge interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go +deleted file mode 100644 +index 41a561bc4a2..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go ++++ /dev/null +@@ -1,70 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package asyncint64 // import "go.opentelemetry.io/otel/metric/instrument/asyncint64" +- +-import ( +- "context" +- +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric/instrument" +-) +- +-// InstrumentProvider provides access to individual instruments. +-type InstrumentProvider interface { +- // Counter creates an instrument for recording increasing values. +- Counter(name string, opts ...instrument.Option) (Counter, error) +- +- // UpDownCounter creates an instrument for recording changes of a value. +- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) +- +- // Gauge creates an instrument for recording the current value. +- Gauge(name string, opts ...instrument.Option) (Gauge, error) +-} +- +-// Counter is an instrument that records increasing values. +-type Counter interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +- +-// UpDownCounter is an instrument that records increasing or decreasing values. +-type UpDownCounter interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +- +-// Gauge is an instrument that records independent readings. +-type Gauge interface { +- // Observe records the state of the instrument. +- // +- // It is only valid to call this within a callback. If called outside of the +- // registered callback it should have no effect on the instrument, and an +- // error will be reported via the error handler. +- Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) +- +- instrument.Asynchronous +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/config.go b/vendor/go.opentelemetry.io/otel/metric/instrument/config.go +deleted file mode 100644 +index 8778bce1619..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/config.go ++++ /dev/null +@@ -1,69 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package instrument // import "go.opentelemetry.io/otel/metric/instrument" +- +-import "go.opentelemetry.io/otel/metric/unit" +- +-// Config contains options for metric instrument descriptors. +-type Config struct { +- description string +- unit unit.Unit +-} +- +-// Description describes the instrument in human-readable terms. +-func (cfg Config) Description() string { +- return cfg.description +-} +- +-// Unit describes the measurement unit for an instrument. +-func (cfg Config) Unit() unit.Unit { +- return cfg.unit +-} +- +-// Option is an interface for applying metric instrument options. +-type Option interface { +- applyInstrument(Config) Config +-} +- +-// NewConfig creates a new Config and applies all the given options. +-func NewConfig(opts ...Option) Config { +- var config Config +- for _, o := range opts { +- config = o.applyInstrument(config) +- } +- return config +-} +- +-type optionFunc func(Config) Config +- +-func (fn optionFunc) applyInstrument(cfg Config) Config { +- return fn(cfg) +-} +- +-// WithDescription applies provided description. +-func WithDescription(desc string) Option { +- return optionFunc(func(cfg Config) Config { +- cfg.description = desc +- return cfg +- }) +-} +- +-// WithUnit applies provided unit. +-func WithUnit(u unit.Unit) Option { +- return optionFunc(func(cfg Config) Config { +- cfg.unit = u +- return cfg +- }) +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go +deleted file mode 100644 +index 435db1127bc..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go ++++ /dev/null +@@ -1,56 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package syncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- +-import ( +- "context" +- +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric/instrument" +-) +- +-// InstrumentProvider provides access to individual instruments. +-type InstrumentProvider interface { +- // Counter creates an instrument for recording increasing values. +- Counter(name string, opts ...instrument.Option) (Counter, error) +- // UpDownCounter creates an instrument for recording changes of a value. +- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) +- // Histogram creates an instrument for recording a distribution of values. +- Histogram(name string, opts ...instrument.Option) (Histogram, error) +-} +- +-// Counter is an instrument that records increasing values. +-type Counter interface { +- // Add records a change to the counter. +- Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +- +-// UpDownCounter is an instrument that records increasing or decreasing values. +-type UpDownCounter interface { +- // Add records a change to the counter. +- Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +- +-// Histogram is an instrument that records a distribution of values. +-type Histogram interface { +- // Record adds an additional value to the distribution. +- Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go +deleted file mode 100644 +index c77a4672860..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go ++++ /dev/null +@@ -1,56 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package syncint64 // import "go.opentelemetry.io/otel/metric/instrument/syncint64" +- +-import ( +- "context" +- +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric/instrument" +-) +- +-// InstrumentProvider provides access to individual instruments. +-type InstrumentProvider interface { +- // Counter creates an instrument for recording increasing values. +- Counter(name string, opts ...instrument.Option) (Counter, error) +- // UpDownCounter creates an instrument for recording changes of a value. +- UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) +- // Histogram creates an instrument for recording a distribution of values. +- Histogram(name string, opts ...instrument.Option) (Histogram, error) +-} +- +-// Counter is an instrument that records increasing values. +-type Counter interface { +- // Add records a change to the counter. +- Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +- +-// UpDownCounter is an instrument that records increasing or decreasing values. +-type UpDownCounter interface { +- // Add records a change to the counter. +- Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +- +-// Histogram is an instrument that records a distribution of values. +-type Histogram interface { +- // Record adds an additional value to the distribution. +- Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue) +- +- instrument.Synchronous +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go +deleted file mode 100644 +index aed8b6660a5..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go ++++ /dev/null +@@ -1,360 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package global // import "go.opentelemetry.io/otel/metric/internal/global" +- +-import ( +- "context" +- "sync/atomic" +- +- "go.opentelemetry.io/otel" +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric" +- "go.opentelemetry.io/otel/metric/instrument" +- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/asyncint64" +- "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/syncint64" +-) +- +-type afCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncfloat64.Counter +- +- instrument.Asynchronous +-} +- +-func (i *afCounter) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncFloat64().Counter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *afCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncfloat64.Counter).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *afCounter) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncfloat64.Counter) +- } +- return nil +-} +- +-type afUpDownCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncfloat64.UpDownCounter +- +- instrument.Asynchronous +-} +- +-func (i *afUpDownCounter) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncFloat64().UpDownCounter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *afUpDownCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncfloat64.UpDownCounter).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *afUpDownCounter) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncfloat64.UpDownCounter) +- } +- return nil +-} +- +-type afGauge struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncfloat64.Gauge +- +- instrument.Asynchronous +-} +- +-func (i *afGauge) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncFloat64().Gauge(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *afGauge) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncfloat64.Gauge).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *afGauge) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncfloat64.Gauge) +- } +- return nil +-} +- +-type aiCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncint64.Counter +- +- instrument.Asynchronous +-} +- +-func (i *aiCounter) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncInt64().Counter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *aiCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncint64.Counter).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *aiCounter) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncint64.Counter) +- } +- return nil +-} +- +-type aiUpDownCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncint64.UpDownCounter +- +- instrument.Asynchronous +-} +- +-func (i *aiUpDownCounter) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncInt64().UpDownCounter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *aiUpDownCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncint64.UpDownCounter).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *aiUpDownCounter) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncint64.UpDownCounter) +- } +- return nil +-} +- +-type aiGauge struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //asyncint64.Gauge +- +- instrument.Asynchronous +-} +- +-func (i *aiGauge) setDelegate(m metric.Meter) { +- ctr, err := m.AsyncInt64().Gauge(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *aiGauge) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(asyncint64.Gauge).Observe(ctx, x, attrs...) +- } +-} +- +-func (i *aiGauge) unwrap() instrument.Asynchronous { +- if ctr := i.delegate.Load(); ctr != nil { +- return ctr.(asyncint64.Gauge) +- } +- return nil +-} +- +-//Sync Instruments. +-type sfCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncfloat64.Counter +- +- instrument.Synchronous +-} +- +-func (i *sfCounter) setDelegate(m metric.Meter) { +- ctr, err := m.SyncFloat64().Counter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *sfCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncfloat64.Counter).Add(ctx, incr, attrs...) +- } +-} +- +-type sfUpDownCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncfloat64.UpDownCounter +- +- instrument.Synchronous +-} +- +-func (i *sfUpDownCounter) setDelegate(m metric.Meter) { +- ctr, err := m.SyncFloat64().UpDownCounter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncfloat64.UpDownCounter).Add(ctx, incr, attrs...) +- } +-} +- +-type sfHistogram struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncfloat64.Histogram +- +- instrument.Synchronous +-} +- +-func (i *sfHistogram) setDelegate(m metric.Meter) { +- ctr, err := m.SyncFloat64().Histogram(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *sfHistogram) Record(ctx context.Context, x float64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncfloat64.Histogram).Record(ctx, x, attrs...) +- } +-} +- +-type siCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncint64.Counter +- +- instrument.Synchronous +-} +- +-func (i *siCounter) setDelegate(m metric.Meter) { +- ctr, err := m.SyncInt64().Counter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *siCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncint64.Counter).Add(ctx, x, attrs...) +- } +-} +- +-type siUpDownCounter struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncint64.UpDownCounter +- +- instrument.Synchronous +-} +- +-func (i *siUpDownCounter) setDelegate(m metric.Meter) { +- ctr, err := m.SyncInt64().UpDownCounter(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *siUpDownCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncint64.UpDownCounter).Add(ctx, x, attrs...) +- } +-} +- +-type siHistogram struct { +- name string +- opts []instrument.Option +- +- delegate atomic.Value //syncint64.Histogram +- +- instrument.Synchronous +-} +- +-func (i *siHistogram) setDelegate(m metric.Meter) { +- ctr, err := m.SyncInt64().Histogram(i.name, i.opts...) +- if err != nil { +- otel.Handle(err) +- return +- } +- i.delegate.Store(ctr) +-} +- +-func (i *siHistogram) Record(ctx context.Context, x int64, attrs ...attribute.KeyValue) { +- if ctr := i.delegate.Load(); ctr != nil { +- ctr.(syncint64.Histogram).Record(ctx, x, attrs...) +- } +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go +deleted file mode 100644 +index 0fa924f397c..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go ++++ /dev/null +@@ -1,347 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package global // import "go.opentelemetry.io/otel/metric/internal/global" +- +-import ( +- "context" +- "sync" +- "sync/atomic" +- +- "go.opentelemetry.io/otel" +- "go.opentelemetry.io/otel/metric" +- "go.opentelemetry.io/otel/metric/instrument" +- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/asyncint64" +- "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/syncint64" +-) +- +-// meterProvider is a placeholder for a configured SDK MeterProvider. +-// +-// All MeterProvider functionality is forwarded to a delegate once +-// configured. +-type meterProvider struct { +- mtx sync.Mutex +- meters map[il]*meter +- +- delegate metric.MeterProvider +-} +- +-type il struct { +- name string +- version string +-} +- +-// setDelegate configures p to delegate all MeterProvider functionality to +-// provider. +-// +-// All Meters provided prior to this function call are switched out to be +-// Meters provided by provider. All instruments and callbacks are recreated and +-// delegated. +-// +-// It is guaranteed by the caller that this happens only once. +-func (p *meterProvider) setDelegate(provider metric.MeterProvider) { +- p.mtx.Lock() +- defer p.mtx.Unlock() +- +- p.delegate = provider +- +- if len(p.meters) == 0 { +- return +- } +- +- for _, meter := range p.meters { +- meter.setDelegate(provider) +- } +- +- p.meters = nil +-} +- +-// Meter implements MeterProvider. +-func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { +- p.mtx.Lock() +- defer p.mtx.Unlock() +- +- if p.delegate != nil { +- return p.delegate.Meter(name, opts...) +- } +- +- // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. +- +- c := metric.NewMeterConfig(opts...) +- key := il{ +- name: name, +- version: c.InstrumentationVersion(), +- } +- +- if p.meters == nil { +- p.meters = make(map[il]*meter) +- } +- +- if val, ok := p.meters[key]; ok { +- return val +- } +- +- t := &meter{name: name, opts: opts} +- p.meters[key] = t +- return t +-} +- +-// meter is a placeholder for a metric.Meter. +-// +-// All Meter functionality is forwarded to a delegate once configured. +-// Otherwise, all functionality is forwarded to a NoopMeter. +-type meter struct { +- name string +- opts []metric.MeterOption +- +- mtx sync.Mutex +- instruments []delegatedInstrument +- callbacks []delegatedCallback +- +- delegate atomic.Value // metric.Meter +-} +- +-type delegatedInstrument interface { +- setDelegate(metric.Meter) +-} +- +-// setDelegate configures m to delegate all Meter functionality to Meters +-// created by provider. +-// +-// All subsequent calls to the Meter methods will be passed to the delegate. +-// +-// It is guaranteed by the caller that this happens only once. +-func (m *meter) setDelegate(provider metric.MeterProvider) { +- meter := provider.Meter(m.name, m.opts...) +- m.delegate.Store(meter) +- +- m.mtx.Lock() +- defer m.mtx.Unlock() +- +- for _, inst := range m.instruments { +- inst.setDelegate(meter) +- } +- +- for _, callback := range m.callbacks { +- callback.setDelegate(meter) +- } +- +- m.instruments = nil +- m.callbacks = nil +-} +- +-// AsyncInt64 is the namespace for the Asynchronous Integer instruments. +-// +-// To Observe data with instruments it must be registered in a callback. +-func (m *meter) AsyncInt64() asyncint64.InstrumentProvider { +- if del, ok := m.delegate.Load().(metric.Meter); ok { +- return del.AsyncInt64() +- } +- return (*aiInstProvider)(m) +-} +- +-// AsyncFloat64 is the namespace for the Asynchronous Float instruments. +-// +-// To Observe data with instruments it must be registered in a callback. +-func (m *meter) AsyncFloat64() asyncfloat64.InstrumentProvider { +- if del, ok := m.delegate.Load().(metric.Meter); ok { +- return del.AsyncFloat64() +- } +- return (*afInstProvider)(m) +-} +- +-// RegisterCallback captures the function that will be called during Collect. +-// +-// It is only valid to call Observe within the scope of the passed function, +-// and only on the instruments that were registered with this call. +-func (m *meter) RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error { +- if del, ok := m.delegate.Load().(metric.Meter); ok { +- insts = unwrapInstruments(insts) +- return del.RegisterCallback(insts, function) +- } +- +- m.mtx.Lock() +- defer m.mtx.Unlock() +- m.callbacks = append(m.callbacks, delegatedCallback{ +- instruments: insts, +- function: function, +- }) +- +- return nil +-} +- +-type wrapped interface { +- unwrap() instrument.Asynchronous +-} +- +-func unwrapInstruments(instruments []instrument.Asynchronous) []instrument.Asynchronous { +- out := make([]instrument.Asynchronous, 0, len(instruments)) +- +- for _, inst := range instruments { +- if in, ok := inst.(wrapped); ok { +- out = append(out, in.unwrap()) +- } else { +- out = append(out, inst) +- } +- } +- +- return out +-} +- +-// SyncInt64 is the namespace for the Synchronous Integer instruments. +-func (m *meter) SyncInt64() syncint64.InstrumentProvider { +- if del, ok := m.delegate.Load().(metric.Meter); ok { +- return del.SyncInt64() +- } +- return (*siInstProvider)(m) +-} +- +-// SyncFloat64 is the namespace for the Synchronous Float instruments. +-func (m *meter) SyncFloat64() syncfloat64.InstrumentProvider { +- if del, ok := m.delegate.Load().(metric.Meter); ok { +- return del.SyncFloat64() +- } +- return (*sfInstProvider)(m) +-} +- +-type delegatedCallback struct { +- instruments []instrument.Asynchronous +- function func(context.Context) +-} +- +-func (c *delegatedCallback) setDelegate(m metric.Meter) { +- insts := unwrapInstruments(c.instruments) +- err := m.RegisterCallback(insts, c.function) +- if err != nil { +- otel.Handle(err) +- } +-} +- +-type afInstProvider meter +- +-// Counter creates an instrument for recording increasing values. +-func (ip *afInstProvider) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &afCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// UpDownCounter creates an instrument for recording changes of a value. +-func (ip *afInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &afUpDownCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// Gauge creates an instrument for recording the current value. +-func (ip *afInstProvider) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &afGauge{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-type aiInstProvider meter +- +-// Counter creates an instrument for recording increasing values. +-func (ip *aiInstProvider) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &aiCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// UpDownCounter creates an instrument for recording changes of a value. +-func (ip *aiInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &aiUpDownCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// Gauge creates an instrument for recording the current value. +-func (ip *aiInstProvider) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &aiGauge{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-type sfInstProvider meter +- +-// Counter creates an instrument for recording increasing values. +-func (ip *sfInstProvider) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &sfCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// UpDownCounter creates an instrument for recording changes of a value. +-func (ip *sfInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &sfUpDownCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// Histogram creates an instrument for recording a distribution of values. +-func (ip *sfInstProvider) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &sfHistogram{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-type siInstProvider meter +- +-// Counter creates an instrument for recording increasing values. +-func (ip *siInstProvider) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &siCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// UpDownCounter creates an instrument for recording changes of a value. +-func (ip *siInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &siUpDownCounter{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +- +-// Histogram creates an instrument for recording a distribution of values. +-func (ip *siInstProvider) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) { +- ip.mtx.Lock() +- defer ip.mtx.Unlock() +- ctr := &siHistogram{name: name, opts: opts} +- ip.instruments = append(ip.instruments, ctr) +- return ctr, nil +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go +deleted file mode 100644 +index 47c0d787d8a..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go ++++ /dev/null +@@ -1,68 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// htmp://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package global // import "go.opentelemetry.io/otel/metric/internal/global" +- +-import ( +- "errors" +- "sync" +- "sync/atomic" +- +- "go.opentelemetry.io/otel/internal/global" +- "go.opentelemetry.io/otel/metric" +-) +- +-var ( +- globalMeterProvider = defaultMeterProvider() +- +- delegateMeterOnce sync.Once +-) +- +-type meterProviderHolder struct { +- mp metric.MeterProvider +-} +- +-// MeterProvider is the internal implementation for global.MeterProvider. +-func MeterProvider() metric.MeterProvider { +- return globalMeterProvider.Load().(meterProviderHolder).mp +-} +- +-// SetMeterProvider is the internal implementation for global.SetMeterProvider. +-func SetMeterProvider(mp metric.MeterProvider) { +- current := MeterProvider() +- if _, cOk := current.(*meterProvider); cOk { +- if _, mpOk := mp.(*meterProvider); mpOk && current == mp { +- // Do not assign the default delegating MeterProvider to delegate +- // to itself. +- global.Error( +- errors.New("no delegate configured in meter provider"), +- "Setting meter provider to it's current value. No delegate will be configured", +- ) +- return +- } +- } +- +- delegateMeterOnce.Do(func() { +- if def, ok := current.(*meterProvider); ok { +- def.setDelegate(mp) +- } +- }) +- globalMeterProvider.Store(meterProviderHolder{mp: mp}) +-} +- +-func defaultMeterProvider() *atomic.Value { +- v := &atomic.Value{} +- v.Store(meterProviderHolder{mp: &meterProvider{}}) +- return v +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go +index 21fc1c499fb..2520bc74af1 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/meter.go ++++ b/vendor/go.opentelemetry.io/otel/metric/meter.go +@@ -17,44 +17,196 @@ package metric // import "go.opentelemetry.io/otel/metric" + import ( + "context" + +- "go.opentelemetry.io/otel/metric/instrument" +- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/asyncint64" +- "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/syncint64" ++ "go.opentelemetry.io/otel/metric/embedded" + ) + + // MeterProvider provides access to named Meter instances, for instrumenting +-// an application or library. ++// an application or package. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. + type MeterProvider interface { +- // Meter creates an instance of a `Meter` interface. The instrumentationName +- // must be the name of the library providing instrumentation. This name may +- // be the same as the instrumented code only if that code provides built-in +- // instrumentation. If the instrumentationName is empty, then a +- // implementation defined default name will be used instead. +- Meter(instrumentationName string, opts ...MeterOption) Meter ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.MeterProvider ++ ++ // Meter returns a new Meter with the provided name and configuration. ++ // ++ // A Meter should be scoped at most to a single package. The name needs to ++ // be unique so it does not collide with other names used by ++ // an application, nor other applications. To achieve this, the import path ++ // of the instrumentation package is recommended to be used as name. ++ // ++ // If the name is empty, then an implementation defined default name will ++ // be used instead. ++ Meter(name string, opts ...MeterOption) Meter + } + + // Meter provides access to instrument instances for recording metrics. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. + type Meter interface { +- // AsyncInt64 is the namespace for the Asynchronous Integer instruments. ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Meter ++ ++ // Int64Counter returns a new Int64Counter instrument identified by name ++ // and configured with options. The instrument is used to synchronously ++ // record increasing int64 measurements during a computational operation. ++ Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) ++ // Int64UpDownCounter returns a new Int64UpDownCounter instrument ++ // identified by name and configured with options. The instrument is used ++ // to synchronously record int64 measurements during a computational ++ // operation. ++ Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) ++ // Int64Histogram returns a new Int64Histogram instrument identified by ++ // name and configured with options. The instrument is used to ++ // synchronously record the distribution of int64 measurements during a ++ // computational operation. ++ Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) ++ // Int64ObservableCounter returns a new Int64ObservableCounter identified ++ // by name and configured with options. The instrument is used to ++ // asynchronously record increasing int64 measurements once per a ++ // measurement collection cycle. ++ // ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithInt64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) ++ // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter ++ // instrument identified by name and configured with options. The ++ // instrument is used to asynchronously record int64 measurements once per ++ // a measurement collection cycle. + // +- // To Observe data with instruments it must be registered in a callback. +- AsyncInt64() asyncint64.InstrumentProvider ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithInt64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) ++ // Int64ObservableGauge returns a new Int64ObservableGauge instrument ++ // identified by name and configured with options. The instrument is used ++ // to asynchronously record instantaneous int64 measurements once per a ++ // measurement collection cycle. ++ // ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithInt64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + +- // AsyncFloat64 is the namespace for the Asynchronous Float instruments ++ // Float64Counter returns a new Float64Counter instrument identified by ++ // name and configured with options. The instrument is used to ++ // synchronously record increasing float64 measurements during a ++ // computational operation. ++ Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) ++ // Float64UpDownCounter returns a new Float64UpDownCounter instrument ++ // identified by name and configured with options. The instrument is used ++ // to synchronously record float64 measurements during a computational ++ // operation. ++ Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) ++ // Float64Histogram returns a new Float64Histogram instrument identified by ++ // name and configured with options. The instrument is used to ++ // synchronously record the distribution of float64 measurements during a ++ // computational operation. ++ Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) ++ // Float64ObservableCounter returns a new Float64ObservableCounter ++ // instrument identified by name and configured with options. The ++ // instrument is used to asynchronously record increasing float64 ++ // measurements once per a measurement collection cycle. ++ // ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithFloat64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) ++ // Float64ObservableUpDownCounter returns a new ++ // Float64ObservableUpDownCounter instrument identified by name and ++ // configured with options. The instrument is used to asynchronously record ++ // float64 measurements once per a measurement collection cycle. ++ // ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithFloat64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) ++ // Float64ObservableGauge returns a new Float64ObservableGauge instrument ++ // identified by name and configured with options. The instrument is used ++ // to asynchronously record instantaneous float64 measurements once per a ++ // measurement collection cycle. + // +- // To Observe data with instruments it must be registered in a callback. +- AsyncFloat64() asyncfloat64.InstrumentProvider ++ // Measurements for the returned instrument are made via a callback. Use ++ // the WithFloat64Callback option to register the callback here, or use the ++ // RegisterCallback method of this Meter to register one later. See the ++ // Measurements section of the package documentation for more information. ++ Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + +- // RegisterCallback captures the function that will be called during Collect. ++ // RegisterCallback registers f to be called during the collection of a ++ // measurement cycle. ++ // ++ // If Unregister of the returned Registration is called, f needs to be ++ // unregistered and not called during collection. ++ // ++ // The instruments f is registered with are the only instruments that f may ++ // observe values for. ++ // ++ // If no instruments are passed, f should not be registered nor called ++ // during collection. + // +- // It is only valid to call Observe within the scope of the passed function, +- // and only on the instruments that were registered with this call. +- RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error ++ // The function f needs to be concurrent safe. ++ RegisterCallback(f Callback, instruments ...Observable) (Registration, error) ++} ++ ++// Callback is a function registered with a Meter that makes observations for ++// the set of instruments it is registered with. The Observer parameter is used ++// to record measurement observations for these instruments. ++// ++// The function needs to complete in a finite amount of time and the deadline ++// of the passed context is expected to be honored. ++// ++// The function needs to make unique observations across all registered ++// Callbacks. Meaning, it should not report measurements for an instrument with ++// the same attributes as another Callback will report. ++// ++// The function needs to be concurrent safe. ++type Callback func(context.Context, Observer) error ++ ++// Observer records measurements for multiple instruments in a Callback. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Observer interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Observer ++ ++ // ObserveFloat64 records the float64 value for obsrv. ++ ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) ++ // ObserveInt64 records the int64 value for obsrv. ++ ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) ++} + +- // SyncInt64 is the namespace for the Synchronous Integer instruments +- SyncInt64() syncint64.InstrumentProvider +- // SyncFloat64 is the namespace for the Synchronous Float instruments +- SyncFloat64() syncfloat64.InstrumentProvider ++// Registration is an token representing the unique registration of a callback ++// for a set of instruments with a Meter. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Registration interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Registration ++ ++ // Unregister removes the callback registration from a Meter. ++ // ++ // This method needs to be idempotent and concurrent safe. ++ Unregister() error + } +diff --git a/vendor/go.opentelemetry.io/otel/metric/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop.go +deleted file mode 100644 +index e8b9a9a1458..00000000000 +--- a/vendor/go.opentelemetry.io/otel/metric/noop.go ++++ /dev/null +@@ -1,181 +0,0 @@ +-// Copyright The OpenTelemetry Authors +-// +-// Licensed under the Apache License, Version 2.0 (the "License"); +-// you may not use this file except in compliance with the License. +-// You may obtain a copy of the License at +-// +-// http://www.apache.org/licenses/LICENSE-2.0 +-// +-// Unless required by applicable law or agreed to in writing, software +-// distributed under the License is distributed on an "AS IS" BASIS, +-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-// See the License for the specific language governing permissions and +-// limitations under the License. +- +-package metric // import "go.opentelemetry.io/otel/metric" +- +-import ( +- "context" +- +- "go.opentelemetry.io/otel/attribute" +- "go.opentelemetry.io/otel/metric/instrument" +- "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/asyncint64" +- "go.opentelemetry.io/otel/metric/instrument/syncfloat64" +- "go.opentelemetry.io/otel/metric/instrument/syncint64" +-) +- +-// NewNoopMeterProvider creates a MeterProvider that does not record any metrics. +-func NewNoopMeterProvider() MeterProvider { +- return noopMeterProvider{} +-} +- +-type noopMeterProvider struct{} +- +-func (noopMeterProvider) Meter(string, ...MeterOption) Meter { +- return noopMeter{} +-} +- +-// NewNoopMeter creates a Meter that does not record any metrics. +-func NewNoopMeter() Meter { +- return noopMeter{} +-} +- +-type noopMeter struct{} +- +-// AsyncInt64 creates an instrument that does not record any metrics. +-func (noopMeter) AsyncInt64() asyncint64.InstrumentProvider { +- return nonrecordingAsyncInt64Instrument{} +-} +- +-// AsyncFloat64 creates an instrument that does not record any metrics. +-func (noopMeter) AsyncFloat64() asyncfloat64.InstrumentProvider { +- return nonrecordingAsyncFloat64Instrument{} +-} +- +-// SyncInt64 creates an instrument that does not record any metrics. +-func (noopMeter) SyncInt64() syncint64.InstrumentProvider { +- return nonrecordingSyncInt64Instrument{} +-} +- +-// SyncFloat64 creates an instrument that does not record any metrics. +-func (noopMeter) SyncFloat64() syncfloat64.InstrumentProvider { +- return nonrecordingSyncFloat64Instrument{} +-} +- +-// RegisterCallback creates a register callback that does not record any metrics. +-func (noopMeter) RegisterCallback([]instrument.Asynchronous, func(context.Context)) error { +- return nil +-} +- +-type nonrecordingAsyncFloat64Instrument struct { +- instrument.Asynchronous +-} +- +-var ( +- _ asyncfloat64.InstrumentProvider = nonrecordingAsyncFloat64Instrument{} +- _ asyncfloat64.Counter = nonrecordingAsyncFloat64Instrument{} +- _ asyncfloat64.UpDownCounter = nonrecordingAsyncFloat64Instrument{} +- _ asyncfloat64.Gauge = nonrecordingAsyncFloat64Instrument{} +-) +- +-func (n nonrecordingAsyncFloat64Instrument) Counter(string, ...instrument.Option) (asyncfloat64.Counter, error) { +- return n, nil +-} +- +-func (n nonrecordingAsyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (asyncfloat64.UpDownCounter, error) { +- return n, nil +-} +- +-func (n nonrecordingAsyncFloat64Instrument) Gauge(string, ...instrument.Option) (asyncfloat64.Gauge, error) { +- return n, nil +-} +- +-func (nonrecordingAsyncFloat64Instrument) Observe(context.Context, float64, ...attribute.KeyValue) { +- +-} +- +-type nonrecordingAsyncInt64Instrument struct { +- instrument.Asynchronous +-} +- +-var ( +- _ asyncint64.InstrumentProvider = nonrecordingAsyncInt64Instrument{} +- _ asyncint64.Counter = nonrecordingAsyncInt64Instrument{} +- _ asyncint64.UpDownCounter = nonrecordingAsyncInt64Instrument{} +- _ asyncint64.Gauge = nonrecordingAsyncInt64Instrument{} +-) +- +-func (n nonrecordingAsyncInt64Instrument) Counter(string, ...instrument.Option) (asyncint64.Counter, error) { +- return n, nil +-} +- +-func (n nonrecordingAsyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (asyncint64.UpDownCounter, error) { +- return n, nil +-} +- +-func (n nonrecordingAsyncInt64Instrument) Gauge(string, ...instrument.Option) (asyncint64.Gauge, error) { +- return n, nil +-} +- +-func (nonrecordingAsyncInt64Instrument) Observe(context.Context, int64, ...attribute.KeyValue) { +-} +- +-type nonrecordingSyncFloat64Instrument struct { +- instrument.Synchronous +-} +- +-var ( +- _ syncfloat64.InstrumentProvider = nonrecordingSyncFloat64Instrument{} +- _ syncfloat64.Counter = nonrecordingSyncFloat64Instrument{} +- _ syncfloat64.UpDownCounter = nonrecordingSyncFloat64Instrument{} +- _ syncfloat64.Histogram = nonrecordingSyncFloat64Instrument{} +-) +- +-func (n nonrecordingSyncFloat64Instrument) Counter(string, ...instrument.Option) (syncfloat64.Counter, error) { +- return n, nil +-} +- +-func (n nonrecordingSyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (syncfloat64.UpDownCounter, error) { +- return n, nil +-} +- +-func (n nonrecordingSyncFloat64Instrument) Histogram(string, ...instrument.Option) (syncfloat64.Histogram, error) { +- return n, nil +-} +- +-func (nonrecordingSyncFloat64Instrument) Add(context.Context, float64, ...attribute.KeyValue) { +- +-} +- +-func (nonrecordingSyncFloat64Instrument) Record(context.Context, float64, ...attribute.KeyValue) { +- +-} +- +-type nonrecordingSyncInt64Instrument struct { +- instrument.Synchronous +-} +- +-var ( +- _ syncint64.InstrumentProvider = nonrecordingSyncInt64Instrument{} +- _ syncint64.Counter = nonrecordingSyncInt64Instrument{} +- _ syncint64.UpDownCounter = nonrecordingSyncInt64Instrument{} +- _ syncint64.Histogram = nonrecordingSyncInt64Instrument{} +-) +- +-func (n nonrecordingSyncInt64Instrument) Counter(string, ...instrument.Option) (syncint64.Counter, error) { +- return n, nil +-} +- +-func (n nonrecordingSyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (syncint64.UpDownCounter, error) { +- return n, nil +-} +- +-func (n nonrecordingSyncInt64Instrument) Histogram(string, ...instrument.Option) (syncint64.Histogram, error) { +- return n, nil +-} +- +-func (nonrecordingSyncInt64Instrument) Add(context.Context, int64, ...attribute.KeyValue) { +-} +-func (nonrecordingSyncInt64Instrument) Record(context.Context, int64, ...attribute.KeyValue) { +-} +diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +new file mode 100644 +index 00000000000..0a4825ae6a7 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +@@ -0,0 +1,185 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package metric // import "go.opentelemetry.io/otel/metric" ++ ++import ( ++ "context" ++ ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// Float64Counter is an instrument that records increasing float64 values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64Counter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64Counter ++ ++ // Add records a change to the counter. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Add(ctx context.Context, incr float64, options ...AddOption) ++} ++ ++// Float64CounterConfig contains options for synchronous counter instruments that ++// record int64 values. ++type Float64CounterConfig struct { ++ description string ++ unit string ++} ++ ++// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts ++// applied. ++func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { ++ var config Float64CounterConfig ++ for _, o := range opts { ++ config = o.applyFloat64Counter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64CounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64CounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Float64CounterOption applies options to a [Float64CounterConfig]. See ++// [InstrumentOption] for other options that can be used as a ++// Float64CounterOption. ++type Float64CounterOption interface { ++ applyFloat64Counter(Float64CounterConfig) Float64CounterConfig ++} ++ ++// Float64UpDownCounter is an instrument that records increasing or decreasing ++// float64 values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64UpDownCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64UpDownCounter ++ ++ // Add records a change to the counter. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Add(ctx context.Context, incr float64, options ...AddOption) ++} ++ ++// Float64UpDownCounterConfig contains options for synchronous counter ++// instruments that record int64 values. ++type Float64UpDownCounterConfig struct { ++ description string ++ unit string ++} ++ ++// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] ++// with all opts applied. ++func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { ++ var config Float64UpDownCounterConfig ++ for _, o := range opts { ++ config = o.applyFloat64UpDownCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64UpDownCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64UpDownCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Float64UpDownCounterOption applies options to a ++// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that ++// can be used as a Float64UpDownCounterOption. ++type Float64UpDownCounterOption interface { ++ applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig ++} ++ ++// Float64Histogram is an instrument that records a distribution of float64 ++// values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Float64Histogram interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Float64Histogram ++ ++ // Record adds an additional value to the distribution. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Record(ctx context.Context, incr float64, options ...RecordOption) ++} ++ ++// Float64HistogramConfig contains options for synchronous counter instruments ++// that record int64 values. ++type Float64HistogramConfig struct { ++ description string ++ unit string ++ explicitBucketBoundaries []float64 ++} ++ ++// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all ++// opts applied. ++func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { ++ var config Float64HistogramConfig ++ for _, o := range opts { ++ config = o.applyFloat64Histogram(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Float64HistogramConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Float64HistogramConfig) Unit() string { ++ return c.unit ++} ++ ++// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. ++func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { ++ return c.explicitBucketBoundaries ++} ++ ++// Float64HistogramOption applies options to a [Float64HistogramConfig]. See ++// [InstrumentOption] for other options that can be used as a ++// Float64HistogramOption. ++type Float64HistogramOption interface { ++ applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go +new file mode 100644 +index 00000000000..56667d32fc0 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go +@@ -0,0 +1,185 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package metric // import "go.opentelemetry.io/otel/metric" ++ ++import ( ++ "context" ++ ++ "go.opentelemetry.io/otel/metric/embedded" ++) ++ ++// Int64Counter is an instrument that records increasing int64 values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64Counter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64Counter ++ ++ // Add records a change to the counter. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Add(ctx context.Context, incr int64, options ...AddOption) ++} ++ ++// Int64CounterConfig contains options for synchronous counter instruments that ++// record int64 values. ++type Int64CounterConfig struct { ++ description string ++ unit string ++} ++ ++// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts ++// applied. ++func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { ++ var config Int64CounterConfig ++ for _, o := range opts { ++ config = o.applyInt64Counter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64CounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64CounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Int64CounterOption applies options to a [Int64CounterConfig]. See ++// [InstrumentOption] for other options that can be used as an ++// Int64CounterOption. ++type Int64CounterOption interface { ++ applyInt64Counter(Int64CounterConfig) Int64CounterConfig ++} ++ ++// Int64UpDownCounter is an instrument that records increasing or decreasing ++// int64 values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64UpDownCounter interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64UpDownCounter ++ ++ // Add records a change to the counter. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Add(ctx context.Context, incr int64, options ...AddOption) ++} ++ ++// Int64UpDownCounterConfig contains options for synchronous counter ++// instruments that record int64 values. ++type Int64UpDownCounterConfig struct { ++ description string ++ unit string ++} ++ ++// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with ++// all opts applied. ++func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { ++ var config Int64UpDownCounterConfig ++ for _, o := range opts { ++ config = o.applyInt64UpDownCounter(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64UpDownCounterConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64UpDownCounterConfig) Unit() string { ++ return c.unit ++} ++ ++// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. ++// See [InstrumentOption] for other options that can be used as an ++// Int64UpDownCounterOption. ++type Int64UpDownCounterOption interface { ++ applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig ++} ++ ++// Int64Histogram is an instrument that records a distribution of int64 ++// values. ++// ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. ++type Int64Histogram interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Int64Histogram ++ ++ // Record adds an additional value to the distribution. ++ // ++ // Use the WithAttributeSet (or, if performance is not a concern, ++ // the WithAttributes) option to include measurement attributes. ++ Record(ctx context.Context, incr int64, options ...RecordOption) ++} ++ ++// Int64HistogramConfig contains options for synchronous counter instruments ++// that record int64 values. ++type Int64HistogramConfig struct { ++ description string ++ unit string ++ explicitBucketBoundaries []float64 ++} ++ ++// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts ++// applied. ++func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { ++ var config Int64HistogramConfig ++ for _, o := range opts { ++ config = o.applyInt64Histogram(config) ++ } ++ return config ++} ++ ++// Description returns the configured description. ++func (c Int64HistogramConfig) Description() string { ++ return c.description ++} ++ ++// Unit returns the configured unit. ++func (c Int64HistogramConfig) Unit() string { ++ return c.unit ++} ++ ++// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. ++func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { ++ return c.explicitBucketBoundaries ++} ++ ++// Int64HistogramOption applies options to a [Int64HistogramConfig]. See ++// [InstrumentOption] for other options that can be used as an ++// Int64HistogramOption. ++type Int64HistogramOption interface { ++ applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig ++} +diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +index 902692da082..75a8f3435a5 100644 +--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go ++++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +@@ -40,8 +40,10 @@ const ( + // their proprietary information. + type TraceContext struct{} + +-var _ TextMapPropagator = TraceContext{} +-var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") ++var ( ++ _ TextMapPropagator = TraceContext{} ++ traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") ++) + + // Inject set tracecontext from the Context into the carrier. + func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { +diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt +new file mode 100644 +index 00000000000..e0a43e13840 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/requirements.txt +@@ -0,0 +1 @@ ++codespell==2.2.6 +diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +index 5e94b8ae521..59dcfab2501 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +@@ -70,8 +70,8 @@ const ( + // returned. + func firstInt(defaultValue int, keys ...string) int { + for _, key := range keys { +- value, ok := os.LookupEnv(key) +- if !ok { ++ value := os.Getenv(key) ++ if value == "" { + continue + } + +@@ -88,10 +88,10 @@ func firstInt(defaultValue int, keys ...string) int { + } + + // IntEnvOr returns the int value of the environment variable with name key if +-// it exists and the value is an int. Otherwise, defaultValue is returned. ++// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned. + func IntEnvOr(key string, defaultValue int) int { +- value, ok := os.LookupEnv(key) +- if !ok { ++ value := os.Getenv(key) ++ if value == "" { + return defaultValue + } + +diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go +new file mode 100644 +index 00000000000..bd84f624b45 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/internal/gen.go +@@ -0,0 +1,29 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package internal // import "go.opentelemetry.io/otel/sdk/internal" ++ ++//go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go ++//go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go ++//go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go ++ ++//go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go ++//go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go +diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go +index 84a02306e64..dfeaaa8ca04 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go +@@ -14,16 +14,7 @@ + + package internal // import "go.opentelemetry.io/otel/sdk/internal" + +-import ( +- "fmt" +- "time" +- +- "go.opentelemetry.io/otel" +-) +- +-// UserAgent is the user agent to be added to the outgoing +-// requests from the exporters. +-var UserAgent = fmt.Sprintf("opentelemetry-go/%s", otel.Version()) ++import "time" + + // MonotonicEndTime returns the end time at present + // but offset from start, monotonically. +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +index c1d220408ae..4279013be88 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +@@ -18,14 +18,13 @@ import ( + "context" + "errors" + "fmt" ++ "strings" + ) + +-var ( +- // ErrPartialResource is returned by a detector when complete source +- // information for a Resource is unavailable or the source information +- // contains invalid values that are omitted from the returned Resource. +- ErrPartialResource = errors.New("partial resource") +-) ++// ErrPartialResource is returned by a detector when complete source ++// information for a Resource is unavailable or the source information ++// contains invalid values that are omitted from the returned Resource. ++var ErrPartialResource = errors.New("partial resource") + + // Detector detects OpenTelemetry resource information. + type Detector interface { +@@ -45,28 +44,65 @@ type Detector interface { + // Detect calls all input detectors sequentially and merges each result with the previous one. + // It returns the merged error too. + func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { +- var autoDetectedRes *Resource +- var errInfo []string ++ r := new(Resource) ++ return r, detect(ctx, r, detectors) ++} ++ ++// detect runs all detectors using ctx and merges the result into res. This ++// assumes res is allocated and not nil, it will panic otherwise. ++func detect(ctx context.Context, res *Resource, detectors []Detector) error { ++ var ( ++ r *Resource ++ errs detectErrs ++ err error ++ ) ++ + for _, detector := range detectors { + if detector == nil { + continue + } +- res, err := detector.Detect(ctx) ++ r, err = detector.Detect(ctx) + if err != nil { +- errInfo = append(errInfo, err.Error()) ++ errs = append(errs, err) + if !errors.Is(err, ErrPartialResource) { + continue + } + } +- autoDetectedRes, err = Merge(autoDetectedRes, res) ++ r, err = Merge(res, r) + if err != nil { +- errInfo = append(errInfo, err.Error()) ++ errs = append(errs, err) + } ++ *res = *r ++ } ++ ++ if len(errs) == 0 { ++ return nil ++ } ++ return errs ++} ++ ++type detectErrs []error ++ ++func (e detectErrs) Error() string { ++ errStr := make([]string, len(e)) ++ for i, err := range e { ++ errStr[i] = fmt.Sprintf("* %s", err) + } + +- var aggregatedError error +- if len(errInfo) > 0 { +- aggregatedError = fmt.Errorf("detecting resources: %s", errInfo) ++ format := "%d errors occurred detecting resource:\n\t%s" ++ return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) ++} ++ ++func (e detectErrs) Unwrap() error { ++ switch len(e) { ++ case 0: ++ return nil ++ case 1: ++ return e[0] + } +- return autoDetectedRes, aggregatedError ++ return e[1:] ++} ++ ++func (e detectErrs) Is(target error) bool { ++ return len(e) != 0 && errors.Is(e[0], target) + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +index 7af46c61af0..c63a0dd1f8c 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +@@ -20,9 +20,9 @@ import ( + "os" + "path/filepath" + +- "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ "go.opentelemetry.io/otel/sdk" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + ) + + type ( +@@ -60,9 +60,9 @@ var ( + func (telemetrySDK) Detect(context.Context) (*Resource, error) { + return NewWithAttributes( + semconv.SchemaURL, +- semconv.TelemetrySDKNameKey.String("opentelemetry"), +- semconv.TelemetrySDKLanguageKey.String("go"), +- semconv.TelemetrySDKVersionKey.String(otel.Version()), ++ semconv.TelemetrySDKName("opentelemetry"), ++ semconv.TelemetrySDKLanguageGo, ++ semconv.TelemetrySDKVersion(sdk.Version()), + ), nil + } + +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +index 8e212b12182..f263919f6ec 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +@@ -71,6 +71,11 @@ func WithHost() Option { + return WithDetectors(host{}) + } + ++// WithHostID adds host ID information to the configured resource. ++func WithHostID() Option { ++ return WithDetectors(hostIDDetector{}) ++} ++ + // WithTelemetrySDK adds TelemetrySDK version info to the configured resource. + func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) +@@ -194,6 +199,8 @@ func WithContainer() Option { + } + + // WithContainerID adds an attribute with the id of the container to the configured Resource. ++// Note: WithContainerID will not extract the correct container ID in an ECS environment. ++// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). + func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +index 7a897e96977..3d536228283 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +@@ -22,7 +22,7 @@ import ( + "os" + "regexp" + +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + ) + + type containerIDProvider func() (string, error) +@@ -47,7 +47,7 @@ func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) + if containerID == "" { + return Empty(), nil + } +- return NewWithAttributes(semconv.SchemaURL, semconv.ContainerIDKey.String(containerID)), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil + } + + var ( +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go +index 9aab3d83934..d55a50b0dc2 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go +@@ -25,4 +25,7 @@ + // OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret + // the value as a list of comma delimited key/value pairs + // (e.g. `=,=,...`). ++// ++// While this package provides a stable API, ++// the attributes added by resource detectors may change. + package resource // import "go.opentelemetry.io/otel/sdk/resource" +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +index eb22d007922..e29ae563a69 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +@@ -17,25 +17,25 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" + import ( + "context" + "fmt" ++ "net/url" + "os" + "strings" + ++ "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + ) + + const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. +- resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" ++ resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" + ) + +-var ( +- // errMissingValue is returned when a resource value is missing. +- errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) +-) ++// errMissingValue is returned when a resource value is missing. ++var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) + + // fromEnv is a Detector that implements the Detector and collects + // resources from environment. This Detector is included as a +@@ -57,7 +57,7 @@ func (fromEnv) Detect(context.Context) (*Resource, error) { + var res *Resource + + if svcName != "" { +- res = NewSchemaless(semconv.ServiceNameKey.String(svcName)) ++ res = NewSchemaless(semconv.ServiceName(svcName)) + } + + r2, err := constructOTResources(attrs) +@@ -80,16 +80,23 @@ func constructOTResources(s string) (*Resource, error) { + return Empty(), nil + } + pairs := strings.Split(s, ",") +- attrs := []attribute.KeyValue{} ++ var attrs []attribute.KeyValue + var invalid []string + for _, p := range pairs { +- field := strings.SplitN(p, "=", 2) +- if len(field) != 2 { ++ k, v, found := strings.Cut(p, "=") ++ if !found { + invalid = append(invalid, p) + continue + } +- k, v := strings.TrimSpace(field[0]), strings.TrimSpace(field[1]) +- attrs = append(attrs, attribute.String(k, v)) ++ key := strings.TrimSpace(k) ++ val, err := url.PathUnescape(strings.TrimSpace(v)) ++ if err != nil { ++ // Retain original value if decoding fails, otherwise it will be ++ // an empty string. ++ val = v ++ otel.Handle(err) ++ } ++ attrs = append(attrs, attribute.String(key, val)) + } + var err error + if len(invalid) > 0 { +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +new file mode 100644 +index 00000000000..fb1ebf2cab2 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +@@ -0,0 +1,120 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++import ( ++ "context" ++ "errors" ++ "strings" ++ ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ++) ++ ++type hostIDProvider func() (string, error) ++ ++var defaultHostIDProvider hostIDProvider = platformHostIDReader.read ++ ++var hostID = defaultHostIDProvider ++ ++type hostIDReader interface { ++ read() (string, error) ++} ++ ++type fileReader func(string) (string, error) ++ ++type commandExecutor func(string, ...string) (string, error) ++ ++// hostIDReaderBSD implements hostIDReader. ++type hostIDReaderBSD struct { ++ execCommand commandExecutor ++ readFile fileReader ++} ++ ++// read attempts to read the machine-id from /etc/hostid. If not found it will ++// execute `kenv -q smbios.system.uuid`. If neither location yields an id an ++// error will be returned. ++func (r *hostIDReaderBSD) read() (string, error) { ++ if result, err := r.readFile("/etc/hostid"); err == nil { ++ return strings.TrimSpace(result), nil ++ } ++ ++ if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { ++ return strings.TrimSpace(result), nil ++ } ++ ++ return "", errors.New("host id not found in: /etc/hostid or kenv") ++} ++ ++// hostIDReaderDarwin implements hostIDReader. ++type hostIDReaderDarwin struct { ++ execCommand commandExecutor ++} ++ ++// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id ++// from the IOPlatformUUID line. If the command fails or the uuid cannot be ++// parsed an error will be returned. ++func (r *hostIDReaderDarwin) read() (string, error) { ++ result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice") ++ if err != nil { ++ return "", err ++ } ++ ++ lines := strings.Split(result, "\n") ++ for _, line := range lines { ++ if strings.Contains(line, "IOPlatformUUID") { ++ parts := strings.Split(line, " = ") ++ if len(parts) == 2 { ++ return strings.Trim(parts[1], "\""), nil ++ } ++ break ++ } ++ } ++ ++ return "", errors.New("could not parse IOPlatformUUID") ++} ++ ++type hostIDReaderLinux struct { ++ readFile fileReader ++} ++ ++// read attempts to read the machine-id from /etc/machine-id followed by ++// /var/lib/dbus/machine-id. If neither location yields an ID an error will ++// be returned. ++func (r *hostIDReaderLinux) read() (string, error) { ++ if result, err := r.readFile("/etc/machine-id"); err == nil { ++ return strings.TrimSpace(result), nil ++ } ++ ++ if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil { ++ return strings.TrimSpace(result), nil ++ } ++ ++ return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id") ++} ++ ++type hostIDDetector struct{} ++ ++// Detect returns a *Resource containing the platform specific host id. ++func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { ++ hostID, err := hostID() ++ if err != nil { ++ return nil, err ++ } ++ ++ return NewWithAttributes( ++ semconv.SchemaURL, ++ semconv.HostID(hostID), ++ ), nil ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +similarity index 54% +rename from vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go +rename to vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +index e1bbb850d76..1778bbacf05 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +@@ -12,19 +12,12 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package instrument // import "go.opentelemetry.io/otel/metric/instrument" ++//go:build dragonfly || freebsd || netbsd || openbsd || solaris ++// +build dragonfly freebsd netbsd openbsd solaris + +-// Asynchronous instruments are instruments that are updated within a Callback. +-// If an instrument is observed outside of it's callback it should be an error. +-// +-// This interface is used as a grouping mechanism. +-type Asynchronous interface { +- asynchronous() +-} ++package resource // import "go.opentelemetry.io/otel/sdk/resource" + +-// Synchronous instruments are updated in line with application code. +-// +-// This interface is used as a grouping mechanism. +-type Synchronous interface { +- synchronous() ++var platformHostIDReader hostIDReader = &hostIDReaderBSD{ ++ execCommand: execCommand, ++ readFile: readFile, + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go +new file mode 100644 +index 00000000000..ba41409b23c +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go +@@ -0,0 +1,19 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++var platformHostIDReader hostIDReader = &hostIDReaderDarwin{ ++ execCommand: execCommand, ++} +diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go +similarity index 50% +rename from vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go +rename to vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go +index b3fd45d9d31..207acb0ed3a 100644 +--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go +@@ -12,23 +12,18 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-// Package internal contains common functionality for all OTLP exporters. +-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" ++//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris + +-import ( +- "fmt" +- "path" +- "strings" +-) ++package resource // import "go.opentelemetry.io/otel/sdk/resource" + +-// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead. +-func CleanPath(urlPath string, defaultPath string) string { +- tmp := path.Clean(strings.TrimSpace(urlPath)) +- if tmp == "." { +- return defaultPath +- } +- if !path.IsAbs(tmp) { +- tmp = fmt.Sprintf("/%s", tmp) ++import "os/exec" ++ ++func execCommand(name string, arg ...string) (string, error) { ++ cmd := exec.Command(name, arg...) ++ b, err := cmd.Output() ++ if err != nil { ++ return "", err + } +- return tmp ++ ++ return string(b), nil + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +new file mode 100644 +index 00000000000..410579b8fc9 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +@@ -0,0 +1,22 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++//go:build linux ++// +build linux ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++var platformHostIDReader hostIDReader = &hostIDReaderLinux{ ++ readFile: readFile, ++} +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go +new file mode 100644 +index 00000000000..721e3ca6e7d +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go +@@ -0,0 +1,28 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++import "os" ++ ++func readFile(filename string) (string, error) { ++ b, err := os.ReadFile(filename) ++ if err != nil { ++ return "", err ++ } ++ ++ return string(b), nil ++} +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +new file mode 100644 +index 00000000000..89df9d6882e +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +@@ -0,0 +1,36 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// +build !darwin ++// +build !dragonfly ++// +build !freebsd ++// +build !linux ++// +build !netbsd ++// +build !openbsd ++// +build !solaris ++// +build !windows ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++// hostIDReaderUnsupported is a placeholder implementation for operating systems ++// for which this project currently doesn't support host.id ++// attribute detection. See build tags declaration early on this file ++// for a list of unsupported OSes. ++type hostIDReaderUnsupported struct{} ++ ++func (*hostIDReaderUnsupported) read() (string, error) { ++ return "", nil ++} ++ ++var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{} +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +new file mode 100644 +index 00000000000..5b431c6ee6e +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +@@ -0,0 +1,48 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++//go:build windows ++// +build windows ++ ++package resource // import "go.opentelemetry.io/otel/sdk/resource" ++ ++import ( ++ "golang.org/x/sys/windows/registry" ++) ++ ++// implements hostIDReader ++type hostIDReaderWindows struct{} ++ ++// read reads MachineGuid from the windows registry key: ++// SOFTWARE\Microsoft\Cryptography ++func (*hostIDReaderWindows) read() (string, error) { ++ k, err := registry.OpenKey( ++ registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, ++ registry.QUERY_VALUE|registry.WOW64_64KEY, ++ ) ++ ++ if err != nil { ++ return "", err ++ } ++ defer k.Close() ++ ++ guid, _, err := k.GetStringValue("MachineGuid") ++ if err != nil { ++ return "", err ++ } ++ ++ return guid, nil ++} ++ ++var platformHostIDReader hostIDReader = &hostIDReaderWindows{} +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +index 3b4d0c14dbd..0cbd559739c 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +@@ -19,7 +19,7 @@ import ( + "strings" + + "go.opentelemetry.io/otel/attribute" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + ) + + type osDescriptionProvider func() (string, error) +@@ -36,8 +36,10 @@ func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider + } + +-type osTypeDetector struct{} +-type osDescriptionDetector struct{} ++type ( ++ osTypeDetector struct{} ++ osDescriptionDetector struct{} ++) + + // Detect returns a *Resource that describes the operating system type the + // service is running on. +@@ -56,14 +58,13 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + // service is running on. + func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() +- + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, +- semconv.OSDescriptionKey.String(description), ++ semconv.OSDescription(description), + ), nil + } + +@@ -75,6 +76,7 @@ func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ ++ "aix": semconv.OSTypeAIX, + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, +@@ -83,6 +85,7 @@ func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, ++ "zos": semconv.OSTypeZOS, + } + + var osTypeAttribute attribute.KeyValue +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +index fba6790e445..c771942deec 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +@@ -85,14 +85,14 @@ func skip(line string) bool { + // parse attempts to split the provided line on the first '=' character, and then + // sanitize each side of the split before returning them as a key-value pair. + func parse(line string) (string, string, bool) { +- parts := strings.SplitN(line, "=", 2) ++ k, v, found := strings.Cut(line, "=") + +- if len(parts) != 2 || len(parts[0]) == 0 { ++ if !found || len(k) == 0 { + return "", "", false + } + +- key := strings.TrimSpace(parts[0]) +- value := unescape(unquote(strings.TrimSpace(parts[1]))) ++ key := strings.TrimSpace(k) ++ value := unescape(unquote(strings.TrimSpace(v))) + + return key, value, true + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +index 9a169f663fb..ecdd11dd762 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +@@ -22,17 +22,19 @@ import ( + "path/filepath" + "runtime" + +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + ) + +-type pidProvider func() int +-type executablePathProvider func() (string, error) +-type commandArgsProvider func() []string +-type ownerProvider func() (*user.User, error) +-type runtimeNameProvider func() string +-type runtimeVersionProvider func() string +-type runtimeOSProvider func() string +-type runtimeArchProvider func() string ++type ( ++ pidProvider func() int ++ executablePathProvider func() (string, error) ++ commandArgsProvider func() []string ++ ownerProvider func() (*user.User, error) ++ runtimeNameProvider func() string ++ runtimeVersionProvider func() string ++ runtimeOSProvider func() string ++ runtimeArchProvider func() string ++) + + var ( + defaultPidProvider pidProvider = os.Getpid +@@ -108,26 +110,28 @@ func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider + } + +-type processPIDDetector struct{} +-type processExecutableNameDetector struct{} +-type processExecutablePathDetector struct{} +-type processCommandArgsDetector struct{} +-type processOwnerDetector struct{} +-type processRuntimeNameDetector struct{} +-type processRuntimeVersionDetector struct{} +-type processRuntimeDescriptionDetector struct{} ++type ( ++ processPIDDetector struct{} ++ processExecutableNameDetector struct{} ++ processExecutablePathDetector struct{} ++ processCommandArgsDetector struct{} ++ processOwnerDetector struct{} ++ processRuntimeNameDetector struct{} ++ processRuntimeVersionDetector struct{} ++ processRuntimeDescriptionDetector struct{} ++) + + // Detect returns a *Resource that describes the process identifier (PID) of the + // executing process. + func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPIDKey.Int(pid())), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil + } + + // Detect returns a *Resource that describes the name of the process executable. + func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableNameKey.String(executableName)), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil + } + + // Detect returns a *Resource that describes the full path of the process executable. +@@ -137,13 +141,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err + return nil, err + } + +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePathKey.String(executablePath)), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil + } + + // Detect returns a *Resource that describes all the command arguments as received + // by the process. + func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgsKey.StringSlice(commandArgs())), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil + } + + // Detect returns a *Resource that describes the username of the user that owns the +@@ -154,18 +158,18 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + return nil, err + } + +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwnerKey.String(owner.Username)), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil + } + + // Detect returns a *Resource that describes the name of the compiler used to compile + // this process image. + func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeNameKey.String(runtimeName())), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil + } + + // Detect returns a *Resource that describes the version of the runtime of this process. + func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { +- return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersionKey.String(runtimeVersion())), nil ++ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil + } + + // Detect returns a *Resource that describes the runtime of this process. +@@ -175,6 +179,6 @@ func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, + + return NewWithAttributes( + semconv.SchemaURL, +- semconv.ProcessRuntimeDescriptionKey.String(runtimeDescription), ++ semconv.ProcessRuntimeDescription(runtimeDescription), + ), nil + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +index c425ff05db5..176ff106668 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +@@ -17,7 +17,6 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" + import ( + "context" + "errors" +- "fmt" + "sync" + + "go.opentelemetry.io/otel" +@@ -37,7 +36,6 @@ type Resource struct { + } + + var ( +- emptyResource Resource + defaultResource *Resource + defaultResourceOnce sync.Once + ) +@@ -51,17 +49,8 @@ func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg = opt.apply(cfg) + } + +- resource, err := Detect(ctx, cfg.detectors...) +- +- var err2 error +- resource, err2 = Merge(resource, &Resource{schemaURL: cfg.schemaURL}) +- if err == nil { +- err = err2 +- } else if err2 != nil { +- err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) +- } +- +- return resource, err ++ r := &Resource{schemaURL: cfg.schemaURL} ++ return r, detect(ctx, r, cfg.detectors) + } + + // NewWithAttributes creates a resource from attrs and associates the resource with a +@@ -80,18 +69,18 @@ func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource + // of the attrs is known use NewWithAttributes instead. + func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { +- return &emptyResource ++ return &Resource{} + } + + // Ensure attributes comply with the specification: +- // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.0.1/specification/common/common.md#attributes ++ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { +- return &emptyResource ++ return &Resource{} + } + + return &Resource{attrs: s} //nolint +@@ -164,7 +153,7 @@ func (r *Resource) Equal(eq *Resource) bool { + // if resource b's value is empty. + // + // The SchemaURL of the resources will be merged according to the spec rules: +-// https://github.com/open-telemetry/opentelemetry-specification/blob/bad49c714a62da5493f2d1d9bafd7ebe8c8ce7eb/specification/resource/sdk.md#merge ++// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge + // If the resources have different non-empty schemaURL an empty resource and an error + // will be returned. + func Merge(a, b *Resource) (*Resource, error) { +@@ -205,7 +194,7 @@ func Merge(a, b *Resource) (*Resource, error) { + // Empty returns an instance of Resource with no attributes. It is + // equivalent to a `nil` Resource. + func Empty() *Resource { +- return &emptyResource ++ return &Resource{} + } + + // Default returns an instance of Resource with a default +@@ -224,7 +213,7 @@ func Default() *Resource { + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { +- defaultResource = &emptyResource ++ defaultResource = &Resource{} + } + }) + return defaultResource +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +index a2d7db49001..c9c7effbf38 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +@@ -16,7 +16,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" + + import ( + "context" +- "runtime" + "sync" + "sync/atomic" + "time" +@@ -84,6 +83,7 @@ type batchSpanProcessor struct { + stopWait sync.WaitGroup + stopOnce sync.Once + stopCh chan struct{} ++ stopped atomic.Bool + } + + var _ SpanProcessor = (*batchSpanProcessor)(nil) +@@ -91,7 +91,7 @@ var _ SpanProcessor = (*batchSpanProcessor)(nil) + // NewBatchSpanProcessor creates a new SpanProcessor that will send completed + // span batches to the exporter with the supplied options. + // +-// If the exporter is nil, the span processor will preform no action. ++// If the exporter is nil, the span processor will perform no action. + func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { + maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize) + maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) +@@ -137,6 +137,11 @@ func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) + + // OnEnd method enqueues a ReadOnlySpan for later processing. + func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { ++ // Do not enqueue spans after Shutdown. ++ if bsp.stopped.Load() { ++ return ++ } ++ + // Do not enqueue spans if we are just going to drop them. + if bsp.e == nil { + return +@@ -149,6 +154,7 @@ func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { + func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { + var err error + bsp.stopOnce.Do(func() { ++ bsp.stopped.Store(true) + wait := make(chan struct{}) + go func() { + close(bsp.stopCh) +@@ -181,11 +187,24 @@ func (f forceFlushSpan) SpanContext() trace.SpanContext { + + // ForceFlush exports all ended spans that have not yet been exported. + func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { ++ // Interrupt if context is already canceled. ++ if err := ctx.Err(); err != nil { ++ return err ++ } ++ ++ // Do nothing after Shutdown. ++ if bsp.stopped.Load() { ++ return nil ++ } ++ + var err error + if bsp.e != nil { + flushCh := make(chan struct{}) + if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { + select { ++ case <-bsp.stopCh: ++ // The batchSpanProcessor is Shutdown. ++ return nil + case <-flushCh: + // Processed any items in queue prior to ForceFlush being called + case <-ctx.Done(): +@@ -326,11 +345,9 @@ func (bsp *batchSpanProcessor) drainQueue() { + for { + select { + case sd := <-bsp.queue: +- if sd == nil { +- if err := bsp.exportSpans(ctx); err != nil { +- otel.Handle(err) +- } +- return ++ if _, ok := sd.(forceFlushSpan); ok { ++ // Ignore flush requests as they are not valid spans. ++ continue + } + + bsp.batchMutex.Lock() +@@ -344,7 +361,11 @@ func (bsp *batchSpanProcessor) drainQueue() { + } + } + default: +- close(bsp.queue) ++ // There are no more enqueued spans. Make final export. ++ if err := bsp.exportSpans(ctx); err != nil { ++ otel.Handle(err) ++ } ++ return + } + } + } +@@ -358,34 +379,11 @@ func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { + } + } + +-func recoverSendOnClosedChan() { +- x := recover() +- switch err := x.(type) { +- case nil: +- return +- case runtime.Error: +- if err.Error() == "send on closed channel" { +- return +- } +- } +- panic(x) +-} +- + func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + +- // This ensures the bsp.queue<- below does not panic as the +- // processor shuts down. +- defer recoverSendOnClosedChan() +- +- select { +- case <-bsp.stopCh: +- return false +- default: +- } +- + select { + case bsp.queue <- sd: + return true +@@ -399,16 +397,6 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) + return false + } + +- // This ensures the bsp.queue<- below does not panic as the +- // processor shuts down. +- defer recoverSendOnClosedChan() +- +- select { +- case <-bsp.stopCh: +- return false +- default: +- } +- + select { + case bsp.queue <- sd: + return true +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +index 292ea5481bc..7d46c4b48e5 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +@@ -25,6 +25,8 @@ import ( + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" ++ "go.opentelemetry.io/otel/trace/embedded" ++ "go.opentelemetry.io/otel/trace/noop" + ) + + const ( +@@ -73,9 +75,13 @@ func (cfg tracerProviderConfig) MarshalLog() interface{} { + // TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to + // instrumentation so it can trace operational flow through a system. + type TracerProvider struct { ++ embedded.TracerProvider ++ + mu sync.Mutex + namedTracer map[instrumentation.Scope]*tracer +- spanProcessors atomic.Value ++ spanProcessors atomic.Pointer[spanProcessorStates] ++ ++ isShutdown atomic.Bool + + // These fields are not protected by the lock mu. They are assumed to be + // immutable after creation of the TracerProvider. +@@ -116,12 +122,13 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { + spanLimits: o.spanLimits, + resource: o.resource, + } +- + global.Info("TracerProvider created", "config", o) + ++ spss := make(spanProcessorStates, 0, len(o.processors)) + for _, sp := range o.processors { +- tp.RegisterSpanProcessor(sp) ++ spss = append(spss, newSpanProcessorState(sp)) + } ++ tp.spanProcessors.Store(&spss) + + return tp + } +@@ -134,10 +141,11 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { + // + // This method is safe to be called concurrently. + func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { ++ // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown(). ++ if p.isShutdown.Load() { ++ return noop.NewTracerProvider().Tracer(name, opts...) ++ } + c := trace.NewTracerConfig(opts...) +- +- p.mu.Lock() +- defer p.mu.Unlock() + if name == "" { + name = defaultTracerName + } +@@ -146,57 +154,87 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } +- t, ok := p.namedTracer[is] +- if !ok { +- t = &tracer{ +- provider: p, +- instrumentationScope: is, ++ ++ t, ok := func() (trace.Tracer, bool) { ++ p.mu.Lock() ++ defer p.mu.Unlock() ++ // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran ++ // after the first check above but before we acquired the mutex. ++ if p.isShutdown.Load() { ++ return noop.NewTracerProvider().Tracer(name, opts...), true + } +- p.namedTracer[is] = t +- global.Info("Tracer created", "name", name, "version", c.InstrumentationVersion(), "schemaURL", c.SchemaURL()) ++ t, ok := p.namedTracer[is] ++ if !ok { ++ t = &tracer{ ++ provider: p, ++ instrumentationScope: is, ++ } ++ p.namedTracer[is] = t ++ } ++ return t, ok ++ }() ++ if !ok { ++ // This code is outside the mutex to not hold the lock while calling third party logging code: ++ // - That code may do slow things like I/O, which would prolong the duration the lock is held, ++ // slowing down all tracing consumers. ++ // - Logging code may be instrumented with tracing and deadlock because it could try ++ // acquiring the same non-reentrant mutex. ++ global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + } + return t + } + + // RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. +-func (p *TracerProvider) RegisterSpanProcessor(s SpanProcessor) { ++func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { ++ // This check prevents calls during a shutdown. ++ if p.isShutdown.Load() { ++ return ++ } + p.mu.Lock() + defer p.mu.Unlock() +- newSPS := spanProcessorStates{} +- if old, ok := p.spanProcessors.Load().(spanProcessorStates); ok { +- newSPS = append(newSPS, old...) +- } +- newSpanSync := &spanProcessorState{ +- sp: s, +- state: &sync.Once{}, ++ // This check prevents calls after a shutdown. ++ if p.isShutdown.Load() { ++ return + } +- newSPS = append(newSPS, newSpanSync) +- p.spanProcessors.Store(newSPS) ++ ++ current := p.getSpanProcessors() ++ newSPS := make(spanProcessorStates, 0, len(current)+1) ++ newSPS = append(newSPS, current...) ++ newSPS = append(newSPS, newSpanProcessorState(sp)) ++ p.spanProcessors.Store(&newSPS) + } + + // UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. +-func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) { ++func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { ++ // This check prevents calls during a shutdown. ++ if p.isShutdown.Load() { ++ return ++ } + p.mu.Lock() + defer p.mu.Unlock() +- spss := spanProcessorStates{} +- old, ok := p.spanProcessors.Load().(spanProcessorStates) +- if !ok || len(old) == 0 { ++ // This check prevents calls after a shutdown. ++ if p.isShutdown.Load() { ++ return ++ } ++ old := p.getSpanProcessors() ++ if len(old) == 0 { + return + } +- spss = append(spss, old...) ++ spss := make(spanProcessorStates, len(old)) ++ copy(spss, old) + + // stop the span processor if it is started and remove it from the list + var stopOnce *spanProcessorState + var idx int + for i, sps := range spss { +- if sps.sp == s { ++ if sps.sp == sp { + stopOnce = sps + idx = i + } + } + if stopOnce != nil { + stopOnce.state.Do(func() { +- if err := s.Shutdown(context.Background()); err != nil { ++ if err := sp.Shutdown(context.Background()); err != nil { + otel.Handle(err) + } + }) +@@ -207,16 +245,13 @@ func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) { + spss[len(spss)-1] = nil + spss = spss[:len(spss)-1] + +- p.spanProcessors.Store(spss) ++ p.spanProcessors.Store(&spss) + } + + // ForceFlush immediately exports all spans that have not yet been exported for + // all the registered span processors. + func (p *TracerProvider) ForceFlush(ctx context.Context) error { +- spss, ok := p.spanProcessors.Load().(spanProcessorStates) +- if !ok { +- return fmt.Errorf("failed to load span processors") +- } ++ spss := p.getSpanProcessors() + if len(spss) == 0 { + return nil + } +@@ -235,14 +270,23 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { + return nil + } + +-// Shutdown shuts down the span processors in the order they were registered. ++// Shutdown shuts down TracerProvider. All registered span processors are shut down ++// in the order they were registered and any held computational resources are released. ++// After Shutdown is called, all methods are no-ops. + func (p *TracerProvider) Shutdown(ctx context.Context) error { +- spss, ok := p.spanProcessors.Load().(spanProcessorStates) +- if !ok { +- return fmt.Errorf("failed to load span processors") ++ // This check prevents deadlocks in case of recursive shutdown. ++ if p.isShutdown.Load() { ++ return nil + } ++ p.mu.Lock() ++ defer p.mu.Unlock() ++ // This check prevents calls after a shutdown has already been done concurrently. ++ if !p.isShutdown.CompareAndSwap(false, true) { // did toggle? ++ return nil ++ } ++ + var retErr error +- for _, sps := range spss { ++ for _, sps := range p.getSpanProcessors() { + select { + case <-ctx.Done(): + return ctx.Err() +@@ -262,9 +306,14 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { + } + } + } ++ p.spanProcessors.Store(&spanProcessorStates{}) + return retErr + } + ++func (p *TracerProvider) getSpanProcessors() spanProcessorStates { ++ return *(p.spanProcessors.Load()) ++} ++ + // TracerProviderOption configures a TracerProvider. + type TracerProviderOption interface { + apply(tracerProviderConfig) tracerProviderConfig +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +index a6dcf4b307c..a7bc125b9e8 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +@@ -81,7 +81,7 @@ type traceIDRatioSampler struct { + + func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) +- x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 ++ x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 + if x < ts.traceIDUpperBound { + return SamplingResult{ + Decision: RecordAndSample, +@@ -158,15 +158,15 @@ func NeverSample() Sampler { + return alwaysOffSampler{} + } + +-// ParentBased returns a composite sampler which behaves differently, ++// ParentBased returns a sampler decorator which behaves differently, + // based on the parent of the span. If the span has no parent, +-// the root(Sampler) is used to make sampling decision. If the span has ++// the decorated sampler is used to make sampling decision. If the span has + // a parent, depending on whether the parent is remote and whether it + // is sampled, one of the following samplers will apply: +-// - remoteParentSampled(Sampler) (default: AlwaysOn) +-// - remoteParentNotSampled(Sampler) (default: AlwaysOff) +-// - localParentSampled(Sampler) (default: AlwaysOn) +-// - localParentNotSampled(Sampler) (default: AlwaysOff) ++// - remoteParentSampled(Sampler) (default: AlwaysOn) ++// - remoteParentNotSampled(Sampler) (default: AlwaysOff) ++// - localParentSampled(Sampler) (default: AlwaysOn) ++// - localParentNotSampled(Sampler) (default: AlwaysOff) + func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { + return parentBased{ + root: root, +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +index e8530a95932..f8770fff79b 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +@@ -19,12 +19,13 @@ import ( + "sync" + + "go.opentelemetry.io/otel" ++ "go.opentelemetry.io/otel/internal/global" + ) + + // simpleSpanProcessor is a SpanProcessor that synchronously sends all + // completed Spans to a trace.Exporter immediately. + type simpleSpanProcessor struct { +- exporterMu sync.RWMutex ++ exporterMu sync.Mutex + exporter SpanExporter + stopOnce sync.Once + } +@@ -43,6 +44,8 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { + ssp := &simpleSpanProcessor{ + exporter: exporter, + } ++ global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") ++ + return ssp + } + +@@ -51,8 +54,8 @@ func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} + + // OnEnd immediately exports a ReadOnlySpan. + func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { +- ssp.exporterMu.RLock() +- defer ssp.exporterMu.RUnlock() ++ ssp.exporterMu.Lock() ++ defer ssp.exporterMu.Unlock() + + if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { + if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +index 449cf6c2552..36dbf67764b 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +@@ -30,8 +30,9 @@ import ( + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/internal" + "go.opentelemetry.io/otel/sdk/resource" +- semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ++ semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + "go.opentelemetry.io/otel/trace" ++ "go.opentelemetry.io/otel/trace/embedded" + ) + + // ReadOnlySpan allows reading information from the data structure underlying a +@@ -108,6 +109,8 @@ type ReadWriteSpan interface { + // recordingSpan is an implementation of the OpenTelemetry Span API + // representing the individual component of a trace that is sampled. + type recordingSpan struct { ++ embedded.Span ++ + // mu protects the contents of this span. + mu sync.Mutex + +@@ -158,8 +161,10 @@ type recordingSpan struct { + tracer *tracer + } + +-var _ ReadWriteSpan = (*recordingSpan)(nil) +-var _ runtimeTracer = (*recordingSpan)(nil) ++var ( ++ _ ReadWriteSpan = (*recordingSpan)(nil) ++ _ runtimeTracer = (*recordingSpan)(nil) ++) + + // SpanContext returns the SpanContext of this span. + func (s *recordingSpan) SpanContext() trace.SpanContext { +@@ -189,15 +194,18 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) { + if !s.IsRecording() { + return + } ++ s.mu.Lock() ++ defer s.mu.Unlock() ++ if s.status.Code > code { ++ return ++ } + + status := Status{Code: code} + if code == codes.Error { + status.Description = description + } + +- s.mu.Lock() + s.status = status +- s.mu.Unlock() + } + + // SetAttributes sets attributes of this span. +@@ -299,7 +307,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { + // most a length of limit. Each string slice value is truncated in this fashion + // (the slice length itself is unaffected). + // +-// No truncation is perfromed for a negative limit. ++// No truncation is performed for a negative limit. + func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { + if limit < 0 { + return attr +@@ -310,26 +318,13 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { + return attr.Key.String(safeTruncate(v, limit)) + } + case attribute.STRINGSLICE: +- // Do no mutate the original, make a copy. +- trucated := attr.Key.StringSlice(attr.Value.AsStringSlice()) +- // Do not do this. +- // +- // v := trucated.Value.AsStringSlice() +- // cp := make([]string, len(v)) +- // /* Copy and truncate values to cp ... */ +- // trucated.Value = attribute.StringSliceValue(cp) +- // +- // Copying the []string and then assigning it back as a new value with +- // attribute.StringSliceValue will copy the data twice. Instead, we +- // already made a copy above that only this function owns, update the +- // underlying slice data of our copy. +- v := trucated.Value.AsStringSlice() ++ v := attr.Value.AsStringSlice() + for i := range v { + if len(v[i]) > limit { + v[i] = safeTruncate(v[i], limit) + } + } +- return trucated ++ return attr.Key.StringSlice(v) + } + return attr + } +@@ -393,14 +388,14 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { + defer panic(recovered) + opts := []trace.EventOption{ + trace.WithAttributes( +- semconv.ExceptionTypeKey.String(typeStr(recovered)), +- semconv.ExceptionMessageKey.String(fmt.Sprint(recovered)), ++ semconv.ExceptionType(typeStr(recovered)), ++ semconv.ExceptionMessage(fmt.Sprint(recovered)), + ), + } + + if config.StackTrace() { + opts = append(opts, trace.WithAttributes( +- semconv.ExceptionStacktraceKey.String(recordStackTrace()), ++ semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + +@@ -420,14 +415,13 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { + } + s.mu.Unlock() + +- if sps, ok := s.tracer.provider.spanProcessors.Load().(spanProcessorStates); ok { +- if len(sps) == 0 { +- return +- } +- snap := s.snapshot() +- for _, sp := range sps { +- sp.sp.OnEnd(snap) +- } ++ sps := s.tracer.provider.getSpanProcessors() ++ if len(sps) == 0 { ++ return ++ } ++ snap := s.snapshot() ++ for _, sp := range sps { ++ sp.sp.OnEnd(snap) + } + } + +@@ -441,14 +435,14 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { + } + + opts = append(opts, trace.WithAttributes( +- semconv.ExceptionTypeKey.String(typeStr(err)), +- semconv.ExceptionMessageKey.String(err.Error()), ++ semconv.ExceptionType(typeStr(err)), ++ semconv.ExceptionMessage(err.Error()), + )) + + c := trace.NewEventConfig(opts...) + if c.StackTrace() { + opts = append(opts, trace.WithAttributes( +- semconv.ExceptionStacktraceKey.String(recordStackTrace()), ++ semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + +@@ -783,6 +777,8 @@ func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { + // that wraps a SpanContext. It performs no operations other than to return + // the wrapped SpanContext or TracerProvider that created it. + type nonRecordingSpan struct { ++ embedded.Span ++ + // tracer is the SDK tracer that created this span. + tracer *tracer + sc trace.SpanContext +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go +index 9fb3d6eac3b..c9bd52f7ad4 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go +@@ -38,7 +38,7 @@ type SpanExporter interface { + // must never be done outside of a new major release. + + // Shutdown notifies the exporter of a pending halt to operations. The +- // exporter is expected to preform any cleanup or synchronization it ++ // exporter is expected to perform any cleanup or synchronization it + // requires while honoring all timeouts and cancellations contained in + // the passed context. + Shutdown(ctx context.Context) error +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go +index b649a2ff049..9c53657a719 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go +@@ -62,6 +62,11 @@ type SpanProcessor interface { + + type spanProcessorState struct { + sp SpanProcessor +- state *sync.Once ++ state sync.Once + } ++ ++func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { ++ return &spanProcessorState{sp: sp} ++} ++ + type spanProcessorStates []*spanProcessorState +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +index 7b11fc465c6..301e1a7abcc 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +@@ -20,9 +20,12 @@ import ( + + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/trace" ++ "go.opentelemetry.io/otel/trace/embedded" + ) + + type tracer struct { ++ embedded.Tracer ++ + provider *TracerProvider + instrumentationScope instrumentation.Scope + } +@@ -51,7 +54,7 @@ func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanS + + s := tr.newSpan(ctx, name, &config) + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { +- sps, _ := tr.provider.spanProcessors.Load().(spanProcessorStates) ++ sps := tr.provider.getSpanProcessors() + for _, sp := range sps { + sp.sp.OnStart(ctx, rw) + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go +index bfe73de9c41..ae8eae8e8be 100644 +--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go +@@ -162,6 +162,7 @@ func (s spanSnapshot) Resource() *resource.Resource { return s.resource } + func (s spanSnapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope + } ++ + func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { + return s.instrumentationScope + } +diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go +new file mode 100644 +index 00000000000..d3457ed1355 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package trace // import "go.opentelemetry.io/otel/sdk/trace" ++ ++// version is the current release version of the metric SDK in use. ++func version() string { ++ return "1.16.0-rc.1" ++} +diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go +new file mode 100644 +index 00000000000..7048c788e93 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/sdk/version.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package sdk // import "go.opentelemetry.io/otel/sdk" ++ ++// Version is the current release version of the OpenTelemetry SDK in use. ++func Version() string { ++ return "1.20.0" ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +index b580eedeff7..19c394c69b6 100644 +--- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go ++++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +@@ -232,10 +232,12 @@ func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, r + if route != "" { + attrs = append(attrs, sc.HTTPRouteKey.String(route)) + } +- if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { +- if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { +- attrs = append(attrs, sc.HTTPClientIPKey.String(addresses[0])) ++ if values := request.Header["X-Forwarded-For"]; len(values) > 0 { ++ addr := values[0] ++ if i := strings.Index(addr, ","); i > 0 { ++ addr = addr[:i] + } ++ attrs = append(attrs, sc.HTTPClientIPKey.String(addr)) + } + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +new file mode 100644 +index 00000000000..71a1f7748d5 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package semconv implements OpenTelemetry semantic conventions. ++// ++// OpenTelemetry semantic conventions are agreed standardized naming ++// patterns for OpenTelemetry things. This package represents the conventions ++// as of the v1.17.0 version of the OpenTelemetry specification. ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +new file mode 100644 +index 00000000000..679c40c4de4 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +@@ -0,0 +1,199 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// This semantic convention defines the attributes used to represent a feature ++// flag evaluation as an event. ++const ( ++ // FeatureFlagKeyKey is the attribute Key conforming to the ++ // "feature_flag.key" semantic conventions. It represents the unique ++ // identifier of the feature flag. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'logo-color' ++ FeatureFlagKeyKey = attribute.Key("feature_flag.key") ++ ++ // FeatureFlagProviderNameKey is the attribute Key conforming to the ++ // "feature_flag.provider_name" semantic conventions. It represents the ++ // name of the service provider that performs the flag evaluation. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'Flag Manager' ++ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") ++ ++ // FeatureFlagVariantKey is the attribute Key conforming to the ++ // "feature_flag.variant" semantic conventions. It represents the sHOULD be ++ // a semantic identifier for a value. If one is unavailable, a stringified ++ // version of the value can be used. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'red', 'true', 'on' ++ // Note: A semantic identifier, commonly referred to as a variant, provides ++ // a means ++ // for referring to a value without including the value itself. This can ++ // provide additional context for understanding the meaning behind a value. ++ // For example, the variant `red` maybe be used for the value `#c05543`. ++ // ++ // A stringified version of the value can be used in situations where a ++ // semantic identifier is unavailable. String representation of the value ++ // should be determined by the implementer. ++ FeatureFlagVariantKey = attribute.Key("feature_flag.variant") ++) ++ ++// FeatureFlagKey returns an attribute KeyValue conforming to the ++// "feature_flag.key" semantic conventions. It represents the unique identifier ++// of the feature flag. ++func FeatureFlagKey(val string) attribute.KeyValue { ++ return FeatureFlagKeyKey.String(val) ++} ++ ++// FeatureFlagProviderName returns an attribute KeyValue conforming to the ++// "feature_flag.provider_name" semantic conventions. It represents the name of ++// the service provider that performs the flag evaluation. ++func FeatureFlagProviderName(val string) attribute.KeyValue { ++ return FeatureFlagProviderNameKey.String(val) ++} ++ ++// FeatureFlagVariant returns an attribute KeyValue conforming to the ++// "feature_flag.variant" semantic conventions. It represents the sHOULD be a ++// semantic identifier for a value. If one is unavailable, a stringified ++// version of the value can be used. ++func FeatureFlagVariant(val string) attribute.KeyValue { ++ return FeatureFlagVariantKey.String(val) ++} ++ ++// RPC received/sent message. ++const ( ++ // MessageTypeKey is the attribute Key conforming to the "message.type" ++ // semantic conventions. It represents the whether this is a received or ++ // sent message. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageTypeKey = attribute.Key("message.type") ++ ++ // MessageIDKey is the attribute Key conforming to the "message.id" ++ // semantic conventions. It represents the mUST be calculated as two ++ // different counters starting from `1` one for sent messages and one for ++ // received message. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: This way we guarantee that the values will be consistent between ++ // different implementations. ++ MessageIDKey = attribute.Key("message.id") ++ ++ // MessageCompressedSizeKey is the attribute Key conforming to the ++ // "message.compressed_size" semantic conventions. It represents the ++ // compressed size of the message in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageCompressedSizeKey = attribute.Key("message.compressed_size") ++ ++ // MessageUncompressedSizeKey is the attribute Key conforming to the ++ // "message.uncompressed_size" semantic conventions. It represents the ++ // uncompressed size of the message in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ++) ++ ++var ( ++ // sent ++ MessageTypeSent = MessageTypeKey.String("SENT") ++ // received ++ MessageTypeReceived = MessageTypeKey.String("RECEIVED") ++) ++ ++// MessageID returns an attribute KeyValue conforming to the "message.id" ++// semantic conventions. It represents the mUST be calculated as two different ++// counters starting from `1` one for sent messages and one for received ++// message. ++func MessageID(val int) attribute.KeyValue { ++ return MessageIDKey.Int(val) ++} ++ ++// MessageCompressedSize returns an attribute KeyValue conforming to the ++// "message.compressed_size" semantic conventions. It represents the compressed ++// size of the message in bytes. ++func MessageCompressedSize(val int) attribute.KeyValue { ++ return MessageCompressedSizeKey.Int(val) ++} ++ ++// MessageUncompressedSize returns an attribute KeyValue conforming to the ++// "message.uncompressed_size" semantic conventions. It represents the ++// uncompressed size of the message in bytes. ++func MessageUncompressedSize(val int) attribute.KeyValue { ++ return MessageUncompressedSizeKey.Int(val) ++} ++ ++// The attributes used to report a single exception associated with a span. ++const ( ++ // ExceptionEscapedKey is the attribute Key conforming to the ++ // "exception.escaped" semantic conventions. It represents the sHOULD be ++ // set to true if the exception event is recorded at a point where it is ++ // known that the exception is escaping the scope of the span. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: An exception is considered to have escaped (or left) the scope of ++ // a span, ++ // if that span is ended while the exception is still logically "in ++ // flight". ++ // This may be actually "in flight" in some languages (e.g. if the ++ // exception ++ // is passed to a Context manager's `__exit__` method in Python) but will ++ // usually be caught at the point of recording the exception in most ++ // languages. ++ // ++ // It is usually not possible to determine at the point where an exception ++ // is thrown ++ // whether it will escape the scope of a span. ++ // However, it is trivial to know that an exception ++ // will escape, if one checks for an active exception just before ending ++ // the span, ++ // as done in the [example above](#recording-an-exception). ++ // ++ // It follows that an exception may still escape the scope of the span ++ // even if the `exception.escaped` attribute was not set or set to false, ++ // since the event might have been recorded at a time where it was not ++ // clear whether the exception will escape. ++ ExceptionEscapedKey = attribute.Key("exception.escaped") ++) ++ ++// ExceptionEscaped returns an attribute KeyValue conforming to the ++// "exception.escaped" semantic conventions. It represents the sHOULD be set to ++// true if the exception event is recorded at a point where it is known that ++// the exception is escaping the scope of the span. ++func ExceptionEscaped(val bool) attribute.KeyValue { ++ return ExceptionEscapedKey.Bool(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +similarity index 70% +rename from vendor/go.opentelemetry.io/otel/metric/unit/unit.go +rename to vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +index 647d77302de..9b8c559de42 100644 +--- a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +@@ -12,14 +12,9 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-package unit // import "go.opentelemetry.io/otel/metric/unit" ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +-// Unit is a determinate standard quantity of measurement. +-type Unit string +- +-// Units defined by OpenTelemetry. + const ( +- Dimensionless Unit = "1" +- Bytes Unit = "By" +- Milliseconds Unit = "ms" ++ // ExceptionEventName is the name of the Span event representing an exception. ++ ExceptionEventName = "exception" + ) +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +new file mode 100644 +index 00000000000..d5c4b5c136a +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +@@ -0,0 +1,21 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" ++ ++// HTTP scheme attributes. ++var ( ++ HTTPSchemeHTTP = HTTPSchemeKey.String("http") ++ HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ++) +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +new file mode 100644 +index 00000000000..39a2eab3a6a +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +@@ -0,0 +1,2010 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// The web browser in which the application represented by the resource is ++// running. The `browser.*` attributes MUST be used only for resources that ++// represent applications running in a web browser (regardless of whether ++// running on a mobile or desktop device). ++const ( ++ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" ++ // semantic conventions. It represents the array of brand name and version ++ // separated by a space ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.brands`). ++ BrowserBrandsKey = attribute.Key("browser.brands") ++ ++ // BrowserPlatformKey is the attribute Key conforming to the ++ // "browser.platform" semantic conventions. It represents the platform on ++ // which the browser is running ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Windows', 'macOS', 'Android' ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.platform`). If unavailable, the legacy ++ // `navigator.platform` API SHOULD NOT be used instead and this attribute ++ // SHOULD be left unset in order for the values to be consistent. ++ // The list of possible values is defined in the [W3C User-Agent Client ++ // Hints ++ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). ++ // Note that some (but not all) of these values can overlap with values in ++ // the [`os.type` and `os.name` attributes](./os.md). However, for ++ // consistency, the values in the `browser.platform` attribute should ++ // capture the exact value that the user agent provides. ++ BrowserPlatformKey = attribute.Key("browser.platform") ++ ++ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" ++ // semantic conventions. It represents a boolean that is true if the ++ // browser is running on a mobile device ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.mobile`). If unavailable, this attribute ++ // SHOULD be left unset. ++ BrowserMobileKey = attribute.Key("browser.mobile") ++ ++ // BrowserUserAgentKey is the attribute Key conforming to the ++ // "browser.user_agent" semantic conventions. It represents the full ++ // user-agent string provided by the browser ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) ++ // AppleWebKit/537.36 (KHTML, ' ++ // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' ++ // Note: The user-agent value SHOULD be provided only from browsers that do ++ // not have a mechanism to retrieve brands and platform individually from ++ // the User-Agent Client Hints API. To retrieve the value, the legacy ++ // `navigator.userAgent` API can be used. ++ BrowserUserAgentKey = attribute.Key("browser.user_agent") ++ ++ // BrowserLanguageKey is the attribute Key conforming to the ++ // "browser.language" semantic conventions. It represents the preferred ++ // language of the user using the browser ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'en', 'en-US', 'fr', 'fr-FR' ++ // Note: This value is intended to be taken from the Navigator API ++ // `navigator.language`. ++ BrowserLanguageKey = attribute.Key("browser.language") ++) ++ ++// BrowserBrands returns an attribute KeyValue conforming to the ++// "browser.brands" semantic conventions. It represents the array of brand name ++// and version separated by a space ++func BrowserBrands(val ...string) attribute.KeyValue { ++ return BrowserBrandsKey.StringSlice(val) ++} ++ ++// BrowserPlatform returns an attribute KeyValue conforming to the ++// "browser.platform" semantic conventions. It represents the platform on which ++// the browser is running ++func BrowserPlatform(val string) attribute.KeyValue { ++ return BrowserPlatformKey.String(val) ++} ++ ++// BrowserMobile returns an attribute KeyValue conforming to the ++// "browser.mobile" semantic conventions. It represents a boolean that is true ++// if the browser is running on a mobile device ++func BrowserMobile(val bool) attribute.KeyValue { ++ return BrowserMobileKey.Bool(val) ++} ++ ++// BrowserUserAgent returns an attribute KeyValue conforming to the ++// "browser.user_agent" semantic conventions. It represents the full user-agent ++// string provided by the browser ++func BrowserUserAgent(val string) attribute.KeyValue { ++ return BrowserUserAgentKey.String(val) ++} ++ ++// BrowserLanguage returns an attribute KeyValue conforming to the ++// "browser.language" semantic conventions. It represents the preferred ++// language of the user using the browser ++func BrowserLanguage(val string) attribute.KeyValue { ++ return BrowserLanguageKey.String(val) ++} ++ ++// A cloud environment (e.g. GCP, Azure, AWS) ++const ( ++ // CloudProviderKey is the attribute Key conforming to the "cloud.provider" ++ // semantic conventions. It represents the name of the cloud provider. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ CloudProviderKey = attribute.Key("cloud.provider") ++ ++ // CloudAccountIDKey is the attribute Key conforming to the ++ // "cloud.account.id" semantic conventions. It represents the cloud account ++ // ID the resource is assigned to. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '111111111111', 'opentelemetry' ++ CloudAccountIDKey = attribute.Key("cloud.account.id") ++ ++ // CloudRegionKey is the attribute Key conforming to the "cloud.region" ++ // semantic conventions. It represents the geographical region the resource ++ // is running. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-central1', 'us-east-1' ++ // Note: Refer to your provider's docs to see the available regions, for ++ // example [Alibaba Cloud ++ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS ++ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), ++ // [Azure ++ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), ++ // [Google Cloud regions](https://cloud.google.com/about/locations), or ++ // [Tencent Cloud ++ // regions](https://intl.cloud.tencent.com/document/product/213/6091). ++ CloudRegionKey = attribute.Key("cloud.region") ++ ++ // CloudAvailabilityZoneKey is the attribute Key conforming to the ++ // "cloud.availability_zone" semantic conventions. It represents the cloud ++ // regions often have multiple, isolated locations known as zones to ++ // increase availability. Availability zone represents the zone where the ++ // resource is running. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-east-1c' ++ // Note: Availability zones are called "zones" on Alibaba Cloud and Google ++ // Cloud. ++ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") ++ ++ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" ++ // semantic conventions. It represents the cloud platform in use. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: The prefix of the service SHOULD match the one specified in ++ // `cloud.provider`. ++ CloudPlatformKey = attribute.Key("cloud.platform") ++) ++ ++var ( ++ // Alibaba Cloud ++ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") ++ // Amazon Web Services ++ CloudProviderAWS = CloudProviderKey.String("aws") ++ // Microsoft Azure ++ CloudProviderAzure = CloudProviderKey.String("azure") ++ // Google Cloud Platform ++ CloudProviderGCP = CloudProviderKey.String("gcp") ++ // IBM Cloud ++ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") ++ // Tencent Cloud ++ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ++) ++ ++var ( ++ // Alibaba Cloud Elastic Compute Service ++ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") ++ // Alibaba Cloud Function Compute ++ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") ++ // Red Hat OpenShift on Alibaba Cloud ++ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") ++ // AWS Elastic Compute Cloud ++ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") ++ // AWS Elastic Container Service ++ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") ++ // AWS Elastic Kubernetes Service ++ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") ++ // AWS Lambda ++ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") ++ // AWS Elastic Beanstalk ++ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") ++ // AWS App Runner ++ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") ++ // Red Hat OpenShift on AWS (ROSA) ++ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") ++ // Azure Virtual Machines ++ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") ++ // Azure Container Instances ++ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") ++ // Azure Kubernetes Service ++ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") ++ // Azure Functions ++ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") ++ // Azure App Service ++ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") ++ // Azure Red Hat OpenShift ++ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") ++ // Google Cloud Compute Engine (GCE) ++ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") ++ // Google Cloud Run ++ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") ++ // Google Cloud Kubernetes Engine (GKE) ++ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") ++ // Google Cloud Functions (GCF) ++ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") ++ // Google Cloud App Engine (GAE) ++ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") ++ // Red Hat OpenShift on Google Cloud ++ CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") ++ // Red Hat OpenShift on IBM Cloud ++ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") ++ // Tencent Cloud Cloud Virtual Machine (CVM) ++ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") ++ // Tencent Cloud Elastic Kubernetes Service (EKS) ++ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") ++ // Tencent Cloud Serverless Cloud Function (SCF) ++ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ++) ++ ++// CloudAccountID returns an attribute KeyValue conforming to the ++// "cloud.account.id" semantic conventions. It represents the cloud account ID ++// the resource is assigned to. ++func CloudAccountID(val string) attribute.KeyValue { ++ return CloudAccountIDKey.String(val) ++} ++ ++// CloudRegion returns an attribute KeyValue conforming to the ++// "cloud.region" semantic conventions. It represents the geographical region ++// the resource is running. ++func CloudRegion(val string) attribute.KeyValue { ++ return CloudRegionKey.String(val) ++} ++ ++// CloudAvailabilityZone returns an attribute KeyValue conforming to the ++// "cloud.availability_zone" semantic conventions. It represents the cloud ++// regions often have multiple, isolated locations known as zones to increase ++// availability. Availability zone represents the zone where the resource is ++// running. ++func CloudAvailabilityZone(val string) attribute.KeyValue { ++ return CloudAvailabilityZoneKey.String(val) ++} ++ ++// Resources used by AWS Elastic Container Service (ECS). ++const ( ++ // AWSECSContainerARNKey is the attribute Key conforming to the ++ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon ++ // Resource Name (ARN) of an [ECS container ++ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' ++ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") ++ ++ // AWSECSClusterARNKey is the attribute Key conforming to the ++ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an ++ // [ECS ++ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' ++ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") ++ ++ // AWSECSLaunchtypeKey is the attribute Key conforming to the ++ // "aws.ecs.launchtype" semantic conventions. It represents the [launch ++ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) ++ // for an ECS task. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") ++ ++ // AWSECSTaskARNKey is the attribute Key conforming to the ++ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an ++ // [ECS task ++ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' ++ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") ++ ++ // AWSECSTaskFamilyKey is the attribute Key conforming to the ++ // "aws.ecs.task.family" semantic conventions. It represents the task ++ // definition family this task definition is a member of. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-family' ++ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") ++ ++ // AWSECSTaskRevisionKey is the attribute Key conforming to the ++ // "aws.ecs.task.revision" semantic conventions. It represents the revision ++ // for this task definition. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '8', '26' ++ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ++) ++ ++var ( ++ // ec2 ++ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") ++ // fargate ++ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ++) ++ ++// AWSECSContainerARN returns an attribute KeyValue conforming to the ++// "aws.ecs.container.arn" semantic conventions. It represents the Amazon ++// Resource Name (ARN) of an [ECS container ++// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). ++func AWSECSContainerARN(val string) attribute.KeyValue { ++ return AWSECSContainerARNKey.String(val) ++} ++ ++// AWSECSClusterARN returns an attribute KeyValue conforming to the ++// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS ++// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). ++func AWSECSClusterARN(val string) attribute.KeyValue { ++ return AWSECSClusterARNKey.String(val) ++} ++ ++// AWSECSTaskARN returns an attribute KeyValue conforming to the ++// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS ++// task ++// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). ++func AWSECSTaskARN(val string) attribute.KeyValue { ++ return AWSECSTaskARNKey.String(val) ++} ++ ++// AWSECSTaskFamily returns an attribute KeyValue conforming to the ++// "aws.ecs.task.family" semantic conventions. It represents the task ++// definition family this task definition is a member of. ++func AWSECSTaskFamily(val string) attribute.KeyValue { ++ return AWSECSTaskFamilyKey.String(val) ++} ++ ++// AWSECSTaskRevision returns an attribute KeyValue conforming to the ++// "aws.ecs.task.revision" semantic conventions. It represents the revision for ++// this task definition. ++func AWSECSTaskRevision(val string) attribute.KeyValue { ++ return AWSECSTaskRevisionKey.String(val) ++} ++ ++// Resources used by AWS Elastic Kubernetes Service (EKS). ++const ( ++ // AWSEKSClusterARNKey is the attribute Key conforming to the ++ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an ++ // EKS cluster. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' ++ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ++) ++ ++// AWSEKSClusterARN returns an attribute KeyValue conforming to the ++// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS ++// cluster. ++func AWSEKSClusterARN(val string) attribute.KeyValue { ++ return AWSEKSClusterARNKey.String(val) ++} ++ ++// Resources specific to Amazon Web Services. ++const ( ++ // AWSLogGroupNamesKey is the attribute Key conforming to the ++ // "aws.log.group.names" semantic conventions. It represents the name(s) of ++ // the AWS log group(s) an application is writing to. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/aws/lambda/my-function', 'opentelemetry-service' ++ // Note: Multiple log groups must be supported for cases like ++ // multi-container applications, where a single application has sidecar ++ // containers, and each write to their own log group. ++ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") ++ ++ // AWSLogGroupARNsKey is the attribute Key conforming to the ++ // "aws.log.group.arns" semantic conventions. It represents the Amazon ++ // Resource Name(s) (ARN) of the AWS log group(s). ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' ++ // Note: See the [log group ARN format ++ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). ++ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") ++ ++ // AWSLogStreamNamesKey is the attribute Key conforming to the ++ // "aws.log.stream.names" semantic conventions. It represents the name(s) ++ // of the AWS log stream(s) an application is writing to. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' ++ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") ++ ++ // AWSLogStreamARNsKey is the attribute Key conforming to the ++ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of ++ // the AWS log stream(s). ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' ++ // Note: See the [log stream ARN format ++ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). ++ // One log group can contain several log streams, so these ARNs necessarily ++ // identify both a log group and a log stream. ++ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ++) ++ ++// AWSLogGroupNames returns an attribute KeyValue conforming to the ++// "aws.log.group.names" semantic conventions. It represents the name(s) of the ++// AWS log group(s) an application is writing to. ++func AWSLogGroupNames(val ...string) attribute.KeyValue { ++ return AWSLogGroupNamesKey.StringSlice(val) ++} ++ ++// AWSLogGroupARNs returns an attribute KeyValue conforming to the ++// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource ++// Name(s) (ARN) of the AWS log group(s). ++func AWSLogGroupARNs(val ...string) attribute.KeyValue { ++ return AWSLogGroupARNsKey.StringSlice(val) ++} ++ ++// AWSLogStreamNames returns an attribute KeyValue conforming to the ++// "aws.log.stream.names" semantic conventions. It represents the name(s) of ++// the AWS log stream(s) an application is writing to. ++func AWSLogStreamNames(val ...string) attribute.KeyValue { ++ return AWSLogStreamNamesKey.StringSlice(val) ++} ++ ++// AWSLogStreamARNs returns an attribute KeyValue conforming to the ++// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the ++// AWS log stream(s). ++func AWSLogStreamARNs(val ...string) attribute.KeyValue { ++ return AWSLogStreamARNsKey.StringSlice(val) ++} ++ ++// A container instance. ++const ( ++ // ContainerNameKey is the attribute Key conforming to the "container.name" ++ // semantic conventions. It represents the container name used by container ++ // runtime. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-autoconf' ++ ContainerNameKey = attribute.Key("container.name") ++ ++ // ContainerIDKey is the attribute Key conforming to the "container.id" ++ // semantic conventions. It represents the container ID. Usually a UUID, as ++ // for example used to [identify Docker ++ // containers](https://docs.docker.com/engine/reference/run/#container-identification). ++ // The UUID might be abbreviated. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'a3bf90e006b2' ++ ContainerIDKey = attribute.Key("container.id") ++ ++ // ContainerRuntimeKey is the attribute Key conforming to the ++ // "container.runtime" semantic conventions. It represents the container ++ // runtime managing this container. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'docker', 'containerd', 'rkt' ++ ContainerRuntimeKey = attribute.Key("container.runtime") ++ ++ // ContainerImageNameKey is the attribute Key conforming to the ++ // "container.image.name" semantic conventions. It represents the name of ++ // the image the container was built on. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'gcr.io/opentelemetry/operator' ++ ContainerImageNameKey = attribute.Key("container.image.name") ++ ++ // ContainerImageTagKey is the attribute Key conforming to the ++ // "container.image.tag" semantic conventions. It represents the container ++ // image tag. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0.1' ++ ContainerImageTagKey = attribute.Key("container.image.tag") ++) ++ ++// ContainerName returns an attribute KeyValue conforming to the ++// "container.name" semantic conventions. It represents the container name used ++// by container runtime. ++func ContainerName(val string) attribute.KeyValue { ++ return ContainerNameKey.String(val) ++} ++ ++// ContainerID returns an attribute KeyValue conforming to the ++// "container.id" semantic conventions. It represents the container ID. Usually ++// a UUID, as for example used to [identify Docker ++// containers](https://docs.docker.com/engine/reference/run/#container-identification). ++// The UUID might be abbreviated. ++func ContainerID(val string) attribute.KeyValue { ++ return ContainerIDKey.String(val) ++} ++ ++// ContainerRuntime returns an attribute KeyValue conforming to the ++// "container.runtime" semantic conventions. It represents the container ++// runtime managing this container. ++func ContainerRuntime(val string) attribute.KeyValue { ++ return ContainerRuntimeKey.String(val) ++} ++ ++// ContainerImageName returns an attribute KeyValue conforming to the ++// "container.image.name" semantic conventions. It represents the name of the ++// image the container was built on. ++func ContainerImageName(val string) attribute.KeyValue { ++ return ContainerImageNameKey.String(val) ++} ++ ++// ContainerImageTag returns an attribute KeyValue conforming to the ++// "container.image.tag" semantic conventions. It represents the container ++// image tag. ++func ContainerImageTag(val string) attribute.KeyValue { ++ return ContainerImageTagKey.String(val) ++} ++ ++// The software deployment. ++const ( ++ // DeploymentEnvironmentKey is the attribute Key conforming to the ++ // "deployment.environment" semantic conventions. It represents the name of ++ // the [deployment ++ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka ++ // deployment tier). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'staging', 'production' ++ DeploymentEnvironmentKey = attribute.Key("deployment.environment") ++) ++ ++// DeploymentEnvironment returns an attribute KeyValue conforming to the ++// "deployment.environment" semantic conventions. It represents the name of the ++// [deployment ++// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka ++// deployment tier). ++func DeploymentEnvironment(val string) attribute.KeyValue { ++ return DeploymentEnvironmentKey.String(val) ++} ++ ++// The device on which the process represented by this resource is running. ++const ( ++ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic ++ // conventions. It represents a unique identifier representing the device ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' ++ // Note: The device identifier MUST only be defined using the values ++ // outlined below. This value is not an advertising identifier and MUST NOT ++ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal ++ // to the [vendor ++ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). ++ // On Android (Java or Kotlin), this value MUST be equal to the Firebase ++ // Installation ID or a globally unique UUID which is persisted across ++ // sessions in your application. More information can be found ++ // [here](https://developer.android.com/training/articles/user-data-ids) on ++ // best practices and exact implementation details. Caution should be taken ++ // when storing personal data or anything which can identify a user. GDPR ++ // and data protection laws may apply, ensure you do your own due ++ // diligence. ++ DeviceIDKey = attribute.Key("device.id") ++ ++ // DeviceModelIdentifierKey is the attribute Key conforming to the ++ // "device.model.identifier" semantic conventions. It represents the model ++ // identifier for the device ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iPhone3,4', 'SM-G920F' ++ // Note: It's recommended this value represents a machine readable version ++ // of the model identifier rather than the market or consumer-friendly name ++ // of the device. ++ DeviceModelIdentifierKey = attribute.Key("device.model.identifier") ++ ++ // DeviceModelNameKey is the attribute Key conforming to the ++ // "device.model.name" semantic conventions. It represents the marketing ++ // name for the device model ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' ++ // Note: It's recommended this value represents a human readable version of ++ // the device model rather than a machine readable alternative. ++ DeviceModelNameKey = attribute.Key("device.model.name") ++ ++ // DeviceManufacturerKey is the attribute Key conforming to the ++ // "device.manufacturer" semantic conventions. It represents the name of ++ // the device manufacturer ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Apple', 'Samsung' ++ // Note: The Android OS provides this field via ++ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). ++ // iOS apps SHOULD hardcode the value `Apple`. ++ DeviceManufacturerKey = attribute.Key("device.manufacturer") ++) ++ ++// DeviceID returns an attribute KeyValue conforming to the "device.id" ++// semantic conventions. It represents a unique identifier representing the ++// device ++func DeviceID(val string) attribute.KeyValue { ++ return DeviceIDKey.String(val) ++} ++ ++// DeviceModelIdentifier returns an attribute KeyValue conforming to the ++// "device.model.identifier" semantic conventions. It represents the model ++// identifier for the device ++func DeviceModelIdentifier(val string) attribute.KeyValue { ++ return DeviceModelIdentifierKey.String(val) ++} ++ ++// DeviceModelName returns an attribute KeyValue conforming to the ++// "device.model.name" semantic conventions. It represents the marketing name ++// for the device model ++func DeviceModelName(val string) attribute.KeyValue { ++ return DeviceModelNameKey.String(val) ++} ++ ++// DeviceManufacturer returns an attribute KeyValue conforming to the ++// "device.manufacturer" semantic conventions. It represents the name of the ++// device manufacturer ++func DeviceManufacturer(val string) attribute.KeyValue { ++ return DeviceManufacturerKey.String(val) ++} ++ ++// A serverless instance. ++const ( ++ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic ++ // conventions. It represents the name of the single function that this ++ // runtime instance executes. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'my-function', 'myazurefunctionapp/some-function-name' ++ // Note: This is the name of the function as configured/deployed on the ++ // FaaS ++ // platform and is usually different from the name of the callback ++ // function (which may be stored in the ++ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) ++ // span attributes). ++ // ++ // For some cloud providers, the above definition is ambiguous. The ++ // following ++ // definition of function name MUST be used for this attribute ++ // (and consequently the span name) for the listed cloud ++ // providers/products: ++ // ++ // * **Azure:** The full name `/`, i.e., function app name ++ // followed by a forward slash followed by the function name (this form ++ // can also be seen in the resource JSON for the function). ++ // This means that a span attribute MUST be used, as an Azure function ++ // app can host multiple functions that would usually share ++ // a TracerProvider (see also the `faas.id` attribute). ++ FaaSNameKey = attribute.Key("faas.name") ++ ++ // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic ++ // conventions. It represents the unique ID of the single function that ++ // this runtime instance executes. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' ++ // Note: On some cloud providers, it may not be possible to determine the ++ // full ID at startup, ++ // so consider setting `faas.id` as a span attribute instead. ++ // ++ // The exact value to use for `faas.id` depends on the cloud provider: ++ // ++ // * **AWS Lambda:** The function ++ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). ++ // Take care not to use the "invoked ARN" directly but replace any ++ // [alias ++ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) ++ // with the resolved function version, as the same runtime instance may ++ // be invokable with ++ // multiple different aliases. ++ // * **GCP:** The [URI of the ++ // resource](https://cloud.google.com/iam/docs/full-resource-names) ++ // * **Azure:** The [Fully Qualified Resource ++ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) ++ // of the invoked function, ++ // *not* the function app, having the form ++ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. ++ // This means that a span attribute MUST be used, as an Azure function ++ // app can host multiple functions that would usually share ++ // a TracerProvider. ++ FaaSIDKey = attribute.Key("faas.id") ++ ++ // FaaSVersionKey is the attribute Key conforming to the "faas.version" ++ // semantic conventions. It represents the immutable version of the ++ // function being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '26', 'pinkfroid-00002' ++ // Note: Depending on the cloud provider and platform, use: ++ // ++ // * **AWS Lambda:** The [function ++ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) ++ // (an integer represented as a decimal string). ++ // * **Google Cloud Run:** The ++ // [revision](https://cloud.google.com/run/docs/managing/revisions) ++ // (i.e., the function name plus the revision suffix). ++ // * **Google Cloud Functions:** The value of the ++ // [`K_REVISION` environment ++ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). ++ // * **Azure Functions:** Not applicable. Do not set this attribute. ++ FaaSVersionKey = attribute.Key("faas.version") ++ ++ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" ++ // semantic conventions. It represents the execution environment ID as a ++ // string, that will be potentially reused for other invocations to the ++ // same function/function version. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' ++ // Note: * **AWS Lambda:** Use the (full) log stream name. ++ FaaSInstanceKey = attribute.Key("faas.instance") ++ ++ // FaaSMaxMemoryKey is the attribute Key conforming to the ++ // "faas.max_memory" semantic conventions. It represents the amount of ++ // memory available to the serverless function in MiB. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 128 ++ // Note: It's recommended to set this attribute since e.g. too little ++ // memory can easily stop a Java AWS Lambda function from working ++ // correctly. On AWS Lambda, the environment variable ++ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. ++ FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ++) ++ ++// FaaSName returns an attribute KeyValue conforming to the "faas.name" ++// semantic conventions. It represents the name of the single function that ++// this runtime instance executes. ++func FaaSName(val string) attribute.KeyValue { ++ return FaaSNameKey.String(val) ++} ++ ++// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic ++// conventions. It represents the unique ID of the single function that this ++// runtime instance executes. ++func FaaSID(val string) attribute.KeyValue { ++ return FaaSIDKey.String(val) ++} ++ ++// FaaSVersion returns an attribute KeyValue conforming to the ++// "faas.version" semantic conventions. It represents the immutable version of ++// the function being executed. ++func FaaSVersion(val string) attribute.KeyValue { ++ return FaaSVersionKey.String(val) ++} ++ ++// FaaSInstance returns an attribute KeyValue conforming to the ++// "faas.instance" semantic conventions. It represents the execution ++// environment ID as a string, that will be potentially reused for other ++// invocations to the same function/function version. ++func FaaSInstance(val string) attribute.KeyValue { ++ return FaaSInstanceKey.String(val) ++} ++ ++// FaaSMaxMemory returns an attribute KeyValue conforming to the ++// "faas.max_memory" semantic conventions. It represents the amount of memory ++// available to the serverless function in MiB. ++func FaaSMaxMemory(val int) attribute.KeyValue { ++ return FaaSMaxMemoryKey.Int(val) ++} ++ ++// A host is defined as a general computing instance. ++const ( ++ // HostIDKey is the attribute Key conforming to the "host.id" semantic ++ // conventions. It represents the unique host ID. For Cloud, this must be ++ // the instance_id assigned by the cloud provider. For non-containerized ++ // Linux systems, the `machine-id` located in `/etc/machine-id` or ++ // `/var/lib/dbus/machine-id` may be used. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'fdbf79e8af94cb7f9e8df36789187052' ++ HostIDKey = attribute.Key("host.id") ++ ++ // HostNameKey is the attribute Key conforming to the "host.name" semantic ++ // conventions. It represents the name of the host. On Unix systems, it may ++ // contain what the hostname command returns, or the fully qualified ++ // hostname, or another name specified by the user. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-test' ++ HostNameKey = attribute.Key("host.name") ++ ++ // HostTypeKey is the attribute Key conforming to the "host.type" semantic ++ // conventions. It represents the type of host. For Cloud, this must be the ++ // machine type. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'n1-standard-1' ++ HostTypeKey = attribute.Key("host.type") ++ ++ // HostArchKey is the attribute Key conforming to the "host.arch" semantic ++ // conventions. It represents the CPU architecture the host system is ++ // running on. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ HostArchKey = attribute.Key("host.arch") ++ ++ // HostImageNameKey is the attribute Key conforming to the ++ // "host.image.name" semantic conventions. It represents the name of the VM ++ // image or OS install the host was instantiated from. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' ++ HostImageNameKey = attribute.Key("host.image.name") ++ ++ // HostImageIDKey is the attribute Key conforming to the "host.image.id" ++ // semantic conventions. It represents the vM image ID. For Cloud, this ++ // value is from the provider. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'ami-07b06b442921831e5' ++ HostImageIDKey = attribute.Key("host.image.id") ++ ++ // HostImageVersionKey is the attribute Key conforming to the ++ // "host.image.version" semantic conventions. It represents the version ++ // string of the VM image as defined in [Version ++ // Attributes](README.md#version-attributes). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0.1' ++ HostImageVersionKey = attribute.Key("host.image.version") ++) ++ ++var ( ++ // AMD64 ++ HostArchAMD64 = HostArchKey.String("amd64") ++ // ARM32 ++ HostArchARM32 = HostArchKey.String("arm32") ++ // ARM64 ++ HostArchARM64 = HostArchKey.String("arm64") ++ // Itanium ++ HostArchIA64 = HostArchKey.String("ia64") ++ // 32-bit PowerPC ++ HostArchPPC32 = HostArchKey.String("ppc32") ++ // 64-bit PowerPC ++ HostArchPPC64 = HostArchKey.String("ppc64") ++ // IBM z/Architecture ++ HostArchS390x = HostArchKey.String("s390x") ++ // 32-bit x86 ++ HostArchX86 = HostArchKey.String("x86") ++) ++ ++// HostID returns an attribute KeyValue conforming to the "host.id" semantic ++// conventions. It represents the unique host ID. For Cloud, this must be the ++// instance_id assigned by the cloud provider. For non-containerized Linux ++// systems, the `machine-id` located in `/etc/machine-id` or ++// `/var/lib/dbus/machine-id` may be used. ++func HostID(val string) attribute.KeyValue { ++ return HostIDKey.String(val) ++} ++ ++// HostName returns an attribute KeyValue conforming to the "host.name" ++// semantic conventions. It represents the name of the host. On Unix systems, ++// it may contain what the hostname command returns, or the fully qualified ++// hostname, or another name specified by the user. ++func HostName(val string) attribute.KeyValue { ++ return HostNameKey.String(val) ++} ++ ++// HostType returns an attribute KeyValue conforming to the "host.type" ++// semantic conventions. It represents the type of host. For Cloud, this must ++// be the machine type. ++func HostType(val string) attribute.KeyValue { ++ return HostTypeKey.String(val) ++} ++ ++// HostImageName returns an attribute KeyValue conforming to the ++// "host.image.name" semantic conventions. It represents the name of the VM ++// image or OS install the host was instantiated from. ++func HostImageName(val string) attribute.KeyValue { ++ return HostImageNameKey.String(val) ++} ++ ++// HostImageID returns an attribute KeyValue conforming to the ++// "host.image.id" semantic conventions. It represents the vM image ID. For ++// Cloud, this value is from the provider. ++func HostImageID(val string) attribute.KeyValue { ++ return HostImageIDKey.String(val) ++} ++ ++// HostImageVersion returns an attribute KeyValue conforming to the ++// "host.image.version" semantic conventions. It represents the version string ++// of the VM image as defined in [Version ++// Attributes](README.md#version-attributes). ++func HostImageVersion(val string) attribute.KeyValue { ++ return HostImageVersionKey.String(val) ++} ++ ++// A Kubernetes Cluster. ++const ( ++ // K8SClusterNameKey is the attribute Key conforming to the ++ // "k8s.cluster.name" semantic conventions. It represents the name of the ++ // cluster. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-cluster' ++ K8SClusterNameKey = attribute.Key("k8s.cluster.name") ++) ++ ++// K8SClusterName returns an attribute KeyValue conforming to the ++// "k8s.cluster.name" semantic conventions. It represents the name of the ++// cluster. ++func K8SClusterName(val string) attribute.KeyValue { ++ return K8SClusterNameKey.String(val) ++} ++ ++// A Kubernetes Node object. ++const ( ++ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" ++ // semantic conventions. It represents the name of the Node. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'node-1' ++ K8SNodeNameKey = attribute.Key("k8s.node.name") ++ ++ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" ++ // semantic conventions. It represents the UID of the Node. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' ++ K8SNodeUIDKey = attribute.Key("k8s.node.uid") ++) ++ ++// K8SNodeName returns an attribute KeyValue conforming to the ++// "k8s.node.name" semantic conventions. It represents the name of the Node. ++func K8SNodeName(val string) attribute.KeyValue { ++ return K8SNodeNameKey.String(val) ++} ++ ++// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" ++// semantic conventions. It represents the UID of the Node. ++func K8SNodeUID(val string) attribute.KeyValue { ++ return K8SNodeUIDKey.String(val) ++} ++ ++// A Kubernetes Namespace. ++const ( ++ // K8SNamespaceNameKey is the attribute Key conforming to the ++ // "k8s.namespace.name" semantic conventions. It represents the name of the ++ // namespace that the pod is running in. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'default' ++ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ++) ++ ++// K8SNamespaceName returns an attribute KeyValue conforming to the ++// "k8s.namespace.name" semantic conventions. It represents the name of the ++// namespace that the pod is running in. ++func K8SNamespaceName(val string) attribute.KeyValue { ++ return K8SNamespaceNameKey.String(val) ++} ++ ++// A Kubernetes Pod object. ++const ( ++ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" ++ // semantic conventions. It represents the UID of the Pod. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SPodUIDKey = attribute.Key("k8s.pod.uid") ++ ++ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" ++ // semantic conventions. It represents the name of the Pod. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-pod-autoconf' ++ K8SPodNameKey = attribute.Key("k8s.pod.name") ++) ++ ++// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" ++// semantic conventions. It represents the UID of the Pod. ++func K8SPodUID(val string) attribute.KeyValue { ++ return K8SPodUIDKey.String(val) ++} ++ ++// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" ++// semantic conventions. It represents the name of the Pod. ++func K8SPodName(val string) attribute.KeyValue { ++ return K8SPodNameKey.String(val) ++} ++ ++// A container in a ++// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). ++const ( ++ // K8SContainerNameKey is the attribute Key conforming to the ++ // "k8s.container.name" semantic conventions. It represents the name of the ++ // Container from Pod specification, must be unique within a Pod. Container ++ // runtime usually uses different globally unique name (`container.name`). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'redis' ++ K8SContainerNameKey = attribute.Key("k8s.container.name") ++ ++ // K8SContainerRestartCountKey is the attribute Key conforming to the ++ // "k8s.container.restart_count" semantic conventions. It represents the ++ // number of times the container was restarted. This attribute can be used ++ // to identify a particular container (running or stopped) within a ++ // container spec. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 0, 2 ++ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ++) ++ ++// K8SContainerName returns an attribute KeyValue conforming to the ++// "k8s.container.name" semantic conventions. It represents the name of the ++// Container from Pod specification, must be unique within a Pod. Container ++// runtime usually uses different globally unique name (`container.name`). ++func K8SContainerName(val string) attribute.KeyValue { ++ return K8SContainerNameKey.String(val) ++} ++ ++// K8SContainerRestartCount returns an attribute KeyValue conforming to the ++// "k8s.container.restart_count" semantic conventions. It represents the number ++// of times the container was restarted. This attribute can be used to identify ++// a particular container (running or stopped) within a container spec. ++func K8SContainerRestartCount(val int) attribute.KeyValue { ++ return K8SContainerRestartCountKey.Int(val) ++} ++ ++// A Kubernetes ReplicaSet object. ++const ( ++ // K8SReplicaSetUIDKey is the attribute Key conforming to the ++ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the ++ // ReplicaSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") ++ ++ // K8SReplicaSetNameKey is the attribute Key conforming to the ++ // "k8s.replicaset.name" semantic conventions. It represents the name of ++ // the ReplicaSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ++) ++ ++// K8SReplicaSetUID returns an attribute KeyValue conforming to the ++// "k8s.replicaset.uid" semantic conventions. It represents the UID of the ++// ReplicaSet. ++func K8SReplicaSetUID(val string) attribute.KeyValue { ++ return K8SReplicaSetUIDKey.String(val) ++} ++ ++// K8SReplicaSetName returns an attribute KeyValue conforming to the ++// "k8s.replicaset.name" semantic conventions. It represents the name of the ++// ReplicaSet. ++func K8SReplicaSetName(val string) attribute.KeyValue { ++ return K8SReplicaSetNameKey.String(val) ++} ++ ++// A Kubernetes Deployment object. ++const ( ++ // K8SDeploymentUIDKey is the attribute Key conforming to the ++ // "k8s.deployment.uid" semantic conventions. It represents the UID of the ++ // Deployment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") ++ ++ // K8SDeploymentNameKey is the attribute Key conforming to the ++ // "k8s.deployment.name" semantic conventions. It represents the name of ++ // the Deployment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ++) ++ ++// K8SDeploymentUID returns an attribute KeyValue conforming to the ++// "k8s.deployment.uid" semantic conventions. It represents the UID of the ++// Deployment. ++func K8SDeploymentUID(val string) attribute.KeyValue { ++ return K8SDeploymentUIDKey.String(val) ++} ++ ++// K8SDeploymentName returns an attribute KeyValue conforming to the ++// "k8s.deployment.name" semantic conventions. It represents the name of the ++// Deployment. ++func K8SDeploymentName(val string) attribute.KeyValue { ++ return K8SDeploymentNameKey.String(val) ++} ++ ++// A Kubernetes StatefulSet object. ++const ( ++ // K8SStatefulSetUIDKey is the attribute Key conforming to the ++ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the ++ // StatefulSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") ++ ++ // K8SStatefulSetNameKey is the attribute Key conforming to the ++ // "k8s.statefulset.name" semantic conventions. It represents the name of ++ // the StatefulSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ++) ++ ++// K8SStatefulSetUID returns an attribute KeyValue conforming to the ++// "k8s.statefulset.uid" semantic conventions. It represents the UID of the ++// StatefulSet. ++func K8SStatefulSetUID(val string) attribute.KeyValue { ++ return K8SStatefulSetUIDKey.String(val) ++} ++ ++// K8SStatefulSetName returns an attribute KeyValue conforming to the ++// "k8s.statefulset.name" semantic conventions. It represents the name of the ++// StatefulSet. ++func K8SStatefulSetName(val string) attribute.KeyValue { ++ return K8SStatefulSetNameKey.String(val) ++} ++ ++// A Kubernetes DaemonSet object. ++const ( ++ // K8SDaemonSetUIDKey is the attribute Key conforming to the ++ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the ++ // DaemonSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") ++ ++ // K8SDaemonSetNameKey is the attribute Key conforming to the ++ // "k8s.daemonset.name" semantic conventions. It represents the name of the ++ // DaemonSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ++) ++ ++// K8SDaemonSetUID returns an attribute KeyValue conforming to the ++// "k8s.daemonset.uid" semantic conventions. It represents the UID of the ++// DaemonSet. ++func K8SDaemonSetUID(val string) attribute.KeyValue { ++ return K8SDaemonSetUIDKey.String(val) ++} ++ ++// K8SDaemonSetName returns an attribute KeyValue conforming to the ++// "k8s.daemonset.name" semantic conventions. It represents the name of the ++// DaemonSet. ++func K8SDaemonSetName(val string) attribute.KeyValue { ++ return K8SDaemonSetNameKey.String(val) ++} ++ ++// A Kubernetes Job object. ++const ( ++ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" ++ // semantic conventions. It represents the UID of the Job. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SJobUIDKey = attribute.Key("k8s.job.uid") ++ ++ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" ++ // semantic conventions. It represents the name of the Job. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SJobNameKey = attribute.Key("k8s.job.name") ++) ++ ++// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" ++// semantic conventions. It represents the UID of the Job. ++func K8SJobUID(val string) attribute.KeyValue { ++ return K8SJobUIDKey.String(val) ++} ++ ++// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" ++// semantic conventions. It represents the name of the Job. ++func K8SJobName(val string) attribute.KeyValue { ++ return K8SJobNameKey.String(val) ++} ++ ++// A Kubernetes CronJob object. ++const ( ++ // K8SCronJobUIDKey is the attribute Key conforming to the ++ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the ++ // CronJob. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") ++ ++ // K8SCronJobNameKey is the attribute Key conforming to the ++ // "k8s.cronjob.name" semantic conventions. It represents the name of the ++ // CronJob. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ++) ++ ++// K8SCronJobUID returns an attribute KeyValue conforming to the ++// "k8s.cronjob.uid" semantic conventions. It represents the UID of the ++// CronJob. ++func K8SCronJobUID(val string) attribute.KeyValue { ++ return K8SCronJobUIDKey.String(val) ++} ++ ++// K8SCronJobName returns an attribute KeyValue conforming to the ++// "k8s.cronjob.name" semantic conventions. It represents the name of the ++// CronJob. ++func K8SCronJobName(val string) attribute.KeyValue { ++ return K8SCronJobNameKey.String(val) ++} ++ ++// The operating system (OS) on which the process represented by this resource ++// is running. ++const ( ++ // OSTypeKey is the attribute Key conforming to the "os.type" semantic ++ // conventions. It represents the operating system type. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ OSTypeKey = attribute.Key("os.type") ++ ++ // OSDescriptionKey is the attribute Key conforming to the "os.description" ++ // semantic conventions. It represents the human readable (not intended to ++ // be parsed) OS version information, like e.g. reported by `ver` or ++ // `lsb_release -a` commands. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 ++ // LTS' ++ OSDescriptionKey = attribute.Key("os.description") ++ ++ // OSNameKey is the attribute Key conforming to the "os.name" semantic ++ // conventions. It represents the human readable operating system name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iOS', 'Android', 'Ubuntu' ++ OSNameKey = attribute.Key("os.name") ++ ++ // OSVersionKey is the attribute Key conforming to the "os.version" ++ // semantic conventions. It represents the version string of the operating ++ // system as defined in [Version ++ // Attributes](../../resource/semantic_conventions/README.md#version-attributes). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '14.2.1', '18.04.1' ++ OSVersionKey = attribute.Key("os.version") ++) ++ ++var ( ++ // Microsoft Windows ++ OSTypeWindows = OSTypeKey.String("windows") ++ // Linux ++ OSTypeLinux = OSTypeKey.String("linux") ++ // Apple Darwin ++ OSTypeDarwin = OSTypeKey.String("darwin") ++ // FreeBSD ++ OSTypeFreeBSD = OSTypeKey.String("freebsd") ++ // NetBSD ++ OSTypeNetBSD = OSTypeKey.String("netbsd") ++ // OpenBSD ++ OSTypeOpenBSD = OSTypeKey.String("openbsd") ++ // DragonFly BSD ++ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") ++ // HP-UX (Hewlett Packard Unix) ++ OSTypeHPUX = OSTypeKey.String("hpux") ++ // AIX (Advanced Interactive eXecutive) ++ OSTypeAIX = OSTypeKey.String("aix") ++ // SunOS, Oracle Solaris ++ OSTypeSolaris = OSTypeKey.String("solaris") ++ // IBM z/OS ++ OSTypeZOS = OSTypeKey.String("z_os") ++) ++ ++// OSDescription returns an attribute KeyValue conforming to the ++// "os.description" semantic conventions. It represents the human readable (not ++// intended to be parsed) OS version information, like e.g. reported by `ver` ++// or `lsb_release -a` commands. ++func OSDescription(val string) attribute.KeyValue { ++ return OSDescriptionKey.String(val) ++} ++ ++// OSName returns an attribute KeyValue conforming to the "os.name" semantic ++// conventions. It represents the human readable operating system name. ++func OSName(val string) attribute.KeyValue { ++ return OSNameKey.String(val) ++} ++ ++// OSVersion returns an attribute KeyValue conforming to the "os.version" ++// semantic conventions. It represents the version string of the operating ++// system as defined in [Version ++// Attributes](../../resource/semantic_conventions/README.md#version-attributes). ++func OSVersion(val string) attribute.KeyValue { ++ return OSVersionKey.String(val) ++} ++ ++// An operating system process. ++const ( ++ // ProcessPIDKey is the attribute Key conforming to the "process.pid" ++ // semantic conventions. It represents the process identifier (PID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1234 ++ ProcessPIDKey = attribute.Key("process.pid") ++ ++ // ProcessParentPIDKey is the attribute Key conforming to the ++ // "process.parent_pid" semantic conventions. It represents the parent ++ // Process identifier (PID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 111 ++ ProcessParentPIDKey = attribute.Key("process.parent_pid") ++ ++ // ProcessExecutableNameKey is the attribute Key conforming to the ++ // "process.executable.name" semantic conventions. It represents the name ++ // of the process executable. On Linux based systems, can be set to the ++ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name ++ // of `GetProcessImageFileNameW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'otelcol' ++ ProcessExecutableNameKey = attribute.Key("process.executable.name") ++ ++ // ProcessExecutablePathKey is the attribute Key conforming to the ++ // "process.executable.path" semantic conventions. It represents the full ++ // path to the process executable. On Linux based systems, can be set to ++ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of ++ // `GetProcessImageFileNameW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: '/usr/bin/cmd/otelcol' ++ ProcessExecutablePathKey = attribute.Key("process.executable.path") ++ ++ // ProcessCommandKey is the attribute Key conforming to the ++ // "process.command" semantic conventions. It represents the command used ++ // to launch the process (i.e. the command name). On Linux based systems, ++ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can ++ // be set to the first parameter extracted from `GetCommandLineW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'cmd/otelcol' ++ ProcessCommandKey = attribute.Key("process.command") ++ ++ // ProcessCommandLineKey is the attribute Key conforming to the ++ // "process.command_line" semantic conventions. It represents the full ++ // command used to launch the process as a single string representing the ++ // full command. On Windows, can be set to the result of `GetCommandLineW`. ++ // Do not set this if you have to assemble it just for monitoring; use ++ // `process.command_args` instead. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ++ ProcessCommandLineKey = attribute.Key("process.command_line") ++ ++ // ProcessCommandArgsKey is the attribute Key conforming to the ++ // "process.command_args" semantic conventions. It represents the all the ++ // command arguments (including the command/executable itself) as received ++ // by the process. On Linux-based systems (and some other Unixoid systems ++ // supporting procfs), can be set according to the list of null-delimited ++ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, ++ // this would be the full argv vector passed to `main`. ++ // ++ // Type: string[] ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'cmd/otecol', '--config=config.yaml' ++ ProcessCommandArgsKey = attribute.Key("process.command_args") ++ ++ // ProcessOwnerKey is the attribute Key conforming to the "process.owner" ++ // semantic conventions. It represents the username of the user that owns ++ // the process. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'root' ++ ProcessOwnerKey = attribute.Key("process.owner") ++) ++ ++// ProcessPID returns an attribute KeyValue conforming to the "process.pid" ++// semantic conventions. It represents the process identifier (PID). ++func ProcessPID(val int) attribute.KeyValue { ++ return ProcessPIDKey.Int(val) ++} ++ ++// ProcessParentPID returns an attribute KeyValue conforming to the ++// "process.parent_pid" semantic conventions. It represents the parent Process ++// identifier (PID). ++func ProcessParentPID(val int) attribute.KeyValue { ++ return ProcessParentPIDKey.Int(val) ++} ++ ++// ProcessExecutableName returns an attribute KeyValue conforming to the ++// "process.executable.name" semantic conventions. It represents the name of ++// the process executable. On Linux based systems, can be set to the `Name` in ++// `proc/[pid]/status`. On Windows, can be set to the base name of ++// `GetProcessImageFileNameW`. ++func ProcessExecutableName(val string) attribute.KeyValue { ++ return ProcessExecutableNameKey.String(val) ++} ++ ++// ProcessExecutablePath returns an attribute KeyValue conforming to the ++// "process.executable.path" semantic conventions. It represents the full path ++// to the process executable. On Linux based systems, can be set to the target ++// of `proc/[pid]/exe`. On Windows, can be set to the result of ++// `GetProcessImageFileNameW`. ++func ProcessExecutablePath(val string) attribute.KeyValue { ++ return ProcessExecutablePathKey.String(val) ++} ++ ++// ProcessCommand returns an attribute KeyValue conforming to the ++// "process.command" semantic conventions. It represents the command used to ++// launch the process (i.e. the command name). On Linux based systems, can be ++// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to ++// the first parameter extracted from `GetCommandLineW`. ++func ProcessCommand(val string) attribute.KeyValue { ++ return ProcessCommandKey.String(val) ++} ++ ++// ProcessCommandLine returns an attribute KeyValue conforming to the ++// "process.command_line" semantic conventions. It represents the full command ++// used to launch the process as a single string representing the full command. ++// On Windows, can be set to the result of `GetCommandLineW`. Do not set this ++// if you have to assemble it just for monitoring; use `process.command_args` ++// instead. ++func ProcessCommandLine(val string) attribute.KeyValue { ++ return ProcessCommandLineKey.String(val) ++} ++ ++// ProcessCommandArgs returns an attribute KeyValue conforming to the ++// "process.command_args" semantic conventions. It represents the all the ++// command arguments (including the command/executable itself) as received by ++// the process. On Linux-based systems (and some other Unixoid systems ++// supporting procfs), can be set according to the list of null-delimited ++// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, ++// this would be the full argv vector passed to `main`. ++func ProcessCommandArgs(val ...string) attribute.KeyValue { ++ return ProcessCommandArgsKey.StringSlice(val) ++} ++ ++// ProcessOwner returns an attribute KeyValue conforming to the ++// "process.owner" semantic conventions. It represents the username of the user ++// that owns the process. ++func ProcessOwner(val string) attribute.KeyValue { ++ return ProcessOwnerKey.String(val) ++} ++ ++// The single (language) runtime instance which is monitored. ++const ( ++ // ProcessRuntimeNameKey is the attribute Key conforming to the ++ // "process.runtime.name" semantic conventions. It represents the name of ++ // the runtime of this process. For compiled native binaries, this SHOULD ++ // be the name of the compiler. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'OpenJDK Runtime Environment' ++ ProcessRuntimeNameKey = attribute.Key("process.runtime.name") ++ ++ // ProcessRuntimeVersionKey is the attribute Key conforming to the ++ // "process.runtime.version" semantic conventions. It represents the ++ // version of the runtime of this process, as returned by the runtime ++ // without modification. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '14.0.2' ++ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") ++ ++ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the ++ // "process.runtime.description" semantic conventions. It represents an ++ // additional description about the runtime of the process, for example a ++ // specific vendor customization of the runtime environment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ++ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ++) ++ ++// ProcessRuntimeName returns an attribute KeyValue conforming to the ++// "process.runtime.name" semantic conventions. It represents the name of the ++// runtime of this process. For compiled native binaries, this SHOULD be the ++// name of the compiler. ++func ProcessRuntimeName(val string) attribute.KeyValue { ++ return ProcessRuntimeNameKey.String(val) ++} ++ ++// ProcessRuntimeVersion returns an attribute KeyValue conforming to the ++// "process.runtime.version" semantic conventions. It represents the version of ++// the runtime of this process, as returned by the runtime without ++// modification. ++func ProcessRuntimeVersion(val string) attribute.KeyValue { ++ return ProcessRuntimeVersionKey.String(val) ++} ++ ++// ProcessRuntimeDescription returns an attribute KeyValue conforming to the ++// "process.runtime.description" semantic conventions. It represents an ++// additional description about the runtime of the process, for example a ++// specific vendor customization of the runtime environment. ++func ProcessRuntimeDescription(val string) attribute.KeyValue { ++ return ProcessRuntimeDescriptionKey.String(val) ++} ++ ++// A service instance. ++const ( ++ // ServiceNameKey is the attribute Key conforming to the "service.name" ++ // semantic conventions. It represents the logical name of the service. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'shoppingcart' ++ // Note: MUST be the same for all instances of horizontally scaled ++ // services. If the value was not specified, SDKs MUST fallback to ++ // `unknown_service:` concatenated with ++ // [`process.executable.name`](process.md#process), e.g. ++ // `unknown_service:bash`. If `process.executable.name` is not available, ++ // the value MUST be set to `unknown_service`. ++ ServiceNameKey = attribute.Key("service.name") ++ ++ // ServiceNamespaceKey is the attribute Key conforming to the ++ // "service.namespace" semantic conventions. It represents a namespace for ++ // `service.name`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Shop' ++ // Note: A string value having a meaning that helps to distinguish a group ++ // of services, for example the team name that owns a group of services. ++ // `service.name` is expected to be unique within the same namespace. If ++ // `service.namespace` is not specified in the Resource then `service.name` ++ // is expected to be unique for all services that have no explicit ++ // namespace defined (so the empty/unspecified namespace is simply one more ++ // valid namespace). Zero-length namespace string is assumed equal to ++ // unspecified namespace. ++ ServiceNamespaceKey = attribute.Key("service.namespace") ++ ++ // ServiceInstanceIDKey is the attribute Key conforming to the ++ // "service.instance.id" semantic conventions. It represents the string ID ++ // of the service instance. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '627cc493-f310-47de-96bd-71410b7dec09' ++ // Note: MUST be unique for each instance of the same ++ // `service.namespace,service.name` pair (in other words ++ // `service.namespace,service.name,service.instance.id` triplet MUST be ++ // globally unique). The ID helps to distinguish instances of the same ++ // service that exist at the same time (e.g. instances of a horizontally ++ // scaled service). It is preferable for the ID to be persistent and stay ++ // the same for the lifetime of the service instance, however it is ++ // acceptable that the ID is ephemeral and changes during important ++ // lifetime events for the service (e.g. service restarts). If the service ++ // has no inherent unique ID that can be used as the value of this ++ // attribute it is recommended to generate a random Version 1 or Version 4 ++ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use ++ // Version 5, see RFC 4122 for more recommendations). ++ ServiceInstanceIDKey = attribute.Key("service.instance.id") ++ ++ // ServiceVersionKey is the attribute Key conforming to the ++ // "service.version" semantic conventions. It represents the version string ++ // of the service API or implementation. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2.0.0' ++ ServiceVersionKey = attribute.Key("service.version") ++) ++ ++// ServiceName returns an attribute KeyValue conforming to the ++// "service.name" semantic conventions. It represents the logical name of the ++// service. ++func ServiceName(val string) attribute.KeyValue { ++ return ServiceNameKey.String(val) ++} ++ ++// ServiceNamespace returns an attribute KeyValue conforming to the ++// "service.namespace" semantic conventions. It represents a namespace for ++// `service.name`. ++func ServiceNamespace(val string) attribute.KeyValue { ++ return ServiceNamespaceKey.String(val) ++} ++ ++// ServiceInstanceID returns an attribute KeyValue conforming to the ++// "service.instance.id" semantic conventions. It represents the string ID of ++// the service instance. ++func ServiceInstanceID(val string) attribute.KeyValue { ++ return ServiceInstanceIDKey.String(val) ++} ++ ++// ServiceVersion returns an attribute KeyValue conforming to the ++// "service.version" semantic conventions. It represents the version string of ++// the service API or implementation. ++func ServiceVersion(val string) attribute.KeyValue { ++ return ServiceVersionKey.String(val) ++} ++ ++// The telemetry SDK used to capture data recorded by the instrumentation ++// libraries. ++const ( ++ // TelemetrySDKNameKey is the attribute Key conforming to the ++ // "telemetry.sdk.name" semantic conventions. It represents the name of the ++ // telemetry SDK as defined above. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") ++ ++ // TelemetrySDKLanguageKey is the attribute Key conforming to the ++ // "telemetry.sdk.language" semantic conventions. It represents the ++ // language of the telemetry SDK. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") ++ ++ // TelemetrySDKVersionKey is the attribute Key conforming to the ++ // "telemetry.sdk.version" semantic conventions. It represents the version ++ // string of the telemetry SDK. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.2.3' ++ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") ++ ++ // TelemetryAutoVersionKey is the attribute Key conforming to the ++ // "telemetry.auto.version" semantic conventions. It represents the version ++ // string of the auto instrumentation agent, if used. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.2.3' ++ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ++) ++ ++var ( ++ // cpp ++ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") ++ // dotnet ++ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") ++ // erlang ++ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") ++ // go ++ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") ++ // java ++ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") ++ // nodejs ++ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") ++ // php ++ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") ++ // python ++ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") ++ // ruby ++ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") ++ // webjs ++ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") ++ // swift ++ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ++) ++ ++// TelemetrySDKName returns an attribute KeyValue conforming to the ++// "telemetry.sdk.name" semantic conventions. It represents the name of the ++// telemetry SDK as defined above. ++func TelemetrySDKName(val string) attribute.KeyValue { ++ return TelemetrySDKNameKey.String(val) ++} ++ ++// TelemetrySDKVersion returns an attribute KeyValue conforming to the ++// "telemetry.sdk.version" semantic conventions. It represents the version ++// string of the telemetry SDK. ++func TelemetrySDKVersion(val string) attribute.KeyValue { ++ return TelemetrySDKVersionKey.String(val) ++} ++ ++// TelemetryAutoVersion returns an attribute KeyValue conforming to the ++// "telemetry.auto.version" semantic conventions. It represents the version ++// string of the auto instrumentation agent, if used. ++func TelemetryAutoVersion(val string) attribute.KeyValue { ++ return TelemetryAutoVersionKey.String(val) ++} ++ ++// Resource describing the packaged software running the application code. Web ++// engines are typically executed using process.runtime. ++const ( ++ // WebEngineNameKey is the attribute Key conforming to the "webengine.name" ++ // semantic conventions. It represents the name of the web engine. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'WildFly' ++ WebEngineNameKey = attribute.Key("webengine.name") ++ ++ // WebEngineVersionKey is the attribute Key conforming to the ++ // "webengine.version" semantic conventions. It represents the version of ++ // the web engine. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '21.0.0' ++ WebEngineVersionKey = attribute.Key("webengine.version") ++ ++ // WebEngineDescriptionKey is the attribute Key conforming to the ++ // "webengine.description" semantic conventions. It represents the ++ // additional description of the web engine (e.g. detailed version and ++ // edition information). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - ++ // 2.2.2.Final' ++ WebEngineDescriptionKey = attribute.Key("webengine.description") ++) ++ ++// WebEngineName returns an attribute KeyValue conforming to the ++// "webengine.name" semantic conventions. It represents the name of the web ++// engine. ++func WebEngineName(val string) attribute.KeyValue { ++ return WebEngineNameKey.String(val) ++} ++ ++// WebEngineVersion returns an attribute KeyValue conforming to the ++// "webengine.version" semantic conventions. It represents the version of the ++// web engine. ++func WebEngineVersion(val string) attribute.KeyValue { ++ return WebEngineVersionKey.String(val) ++} ++ ++// WebEngineDescription returns an attribute KeyValue conforming to the ++// "webengine.description" semantic conventions. It represents the additional ++// description of the web engine (e.g. detailed version and edition ++// information). ++func WebEngineDescription(val string) attribute.KeyValue { ++ return WebEngineDescriptionKey.String(val) ++} ++ ++// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's ++// concepts. ++const ( ++ // OtelScopeNameKey is the attribute Key conforming to the ++ // "otel.scope.name" semantic conventions. It represents the name of the ++ // instrumentation scope - (`InstrumentationScope.Name` in OTLP). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'io.opentelemetry.contrib.mongodb' ++ OtelScopeNameKey = attribute.Key("otel.scope.name") ++ ++ // OtelScopeVersionKey is the attribute Key conforming to the ++ // "otel.scope.version" semantic conventions. It represents the version of ++ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.0.0' ++ OtelScopeVersionKey = attribute.Key("otel.scope.version") ++) ++ ++// OtelScopeName returns an attribute KeyValue conforming to the ++// "otel.scope.name" semantic conventions. It represents the name of the ++// instrumentation scope - (`InstrumentationScope.Name` in OTLP). ++func OtelScopeName(val string) attribute.KeyValue { ++ return OtelScopeNameKey.String(val) ++} ++ ++// OtelScopeVersion returns an attribute KeyValue conforming to the ++// "otel.scope.version" semantic conventions. It represents the version of the ++// instrumentation scope - (`InstrumentationScope.Version` in OTLP). ++func OtelScopeVersion(val string) attribute.KeyValue { ++ return OtelScopeVersionKey.String(val) ++} ++ ++// Span attributes used by non-OTLP exporters to represent OpenTelemetry ++// Scope's concepts. ++const ( ++ // OtelLibraryNameKey is the attribute Key conforming to the ++ // "otel.library.name" semantic conventions. It represents the deprecated, ++ // use the `otel.scope.name` attribute. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'io.opentelemetry.contrib.mongodb' ++ OtelLibraryNameKey = attribute.Key("otel.library.name") ++ ++ // OtelLibraryVersionKey is the attribute Key conforming to the ++ // "otel.library.version" semantic conventions. It represents the ++ // deprecated, use the `otel.scope.version` attribute. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '1.0.0' ++ OtelLibraryVersionKey = attribute.Key("otel.library.version") ++) ++ ++// OtelLibraryName returns an attribute KeyValue conforming to the ++// "otel.library.name" semantic conventions. It represents the deprecated, use ++// the `otel.scope.name` attribute. ++func OtelLibraryName(val string) attribute.KeyValue { ++ return OtelLibraryNameKey.String(val) ++} ++ ++// OtelLibraryVersion returns an attribute KeyValue conforming to the ++// "otel.library.version" semantic conventions. It represents the deprecated, ++// use the `otel.scope.version` attribute. ++func OtelLibraryVersion(val string) attribute.KeyValue { ++ return OtelLibraryVersionKey.String(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +new file mode 100644 +index 00000000000..42fc525d165 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" ++ ++// SchemaURL is the schema URL that matches the version of the semantic conventions ++// that this package defines. Semconv packages starting from v1.4.0 must declare ++// non-empty schema URL in the form https://opentelemetry.io/schemas/ ++const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +new file mode 100644 +index 00000000000..8c4a7299d27 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +@@ -0,0 +1,3375 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// The shared attributes used to report a single exception associated with a ++// span or log. ++const ( ++ // ExceptionTypeKey is the attribute Key conforming to the "exception.type" ++ // semantic conventions. It represents the type of the exception (its ++ // fully-qualified class name, if applicable). The dynamic type of the ++ // exception should be preferred over the static type in languages that ++ // support it. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'java.net.ConnectException', 'OSError' ++ ExceptionTypeKey = attribute.Key("exception.type") ++ ++ // ExceptionMessageKey is the attribute Key conforming to the ++ // "exception.message" semantic conventions. It represents the exception ++ // message. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Division by zero', "Can't convert 'int' object to str ++ // implicitly" ++ ExceptionMessageKey = attribute.Key("exception.message") ++ ++ // ExceptionStacktraceKey is the attribute Key conforming to the ++ // "exception.stacktrace" semantic conventions. It represents a stacktrace ++ // as a string in the natural representation for the language runtime. The ++ // representation is to be determined and documented by each language SIG. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test ++ // exception\\n at ' ++ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' ++ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' ++ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ++ ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ++) ++ ++// ExceptionType returns an attribute KeyValue conforming to the ++// "exception.type" semantic conventions. It represents the type of the ++// exception (its fully-qualified class name, if applicable). The dynamic type ++// of the exception should be preferred over the static type in languages that ++// support it. ++func ExceptionType(val string) attribute.KeyValue { ++ return ExceptionTypeKey.String(val) ++} ++ ++// ExceptionMessage returns an attribute KeyValue conforming to the ++// "exception.message" semantic conventions. It represents the exception ++// message. ++func ExceptionMessage(val string) attribute.KeyValue { ++ return ExceptionMessageKey.String(val) ++} ++ ++// ExceptionStacktrace returns an attribute KeyValue conforming to the ++// "exception.stacktrace" semantic conventions. It represents a stacktrace as a ++// string in the natural representation for the language runtime. The ++// representation is to be determined and documented by each language SIG. ++func ExceptionStacktrace(val string) attribute.KeyValue { ++ return ExceptionStacktraceKey.String(val) ++} ++ ++// Attributes for Events represented using Log Records. ++const ( ++ // EventNameKey is the attribute Key conforming to the "event.name" ++ // semantic conventions. It represents the name identifies the event. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'click', 'exception' ++ EventNameKey = attribute.Key("event.name") ++ ++ // EventDomainKey is the attribute Key conforming to the "event.domain" ++ // semantic conventions. It represents the domain identifies the business ++ // context for the events. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: Events across different domains may have same `event.name`, yet be ++ // unrelated events. ++ EventDomainKey = attribute.Key("event.domain") ++) ++ ++var ( ++ // Events from browser apps ++ EventDomainBrowser = EventDomainKey.String("browser") ++ // Events from mobile apps ++ EventDomainDevice = EventDomainKey.String("device") ++ // Events from Kubernetes ++ EventDomainK8S = EventDomainKey.String("k8s") ++) ++ ++// EventName returns an attribute KeyValue conforming to the "event.name" ++// semantic conventions. It represents the name identifies the event. ++func EventName(val string) attribute.KeyValue { ++ return EventNameKey.String(val) ++} ++ ++// Span attributes used by AWS Lambda (in addition to general `faas` ++// attributes). ++const ( ++ // AWSLambdaInvokedARNKey is the attribute Key conforming to the ++ // "aws.lambda.invoked_arn" semantic conventions. It represents the full ++ // invoked ARN as provided on the `Context` passed to the function ++ // (`Lambda-Runtime-Invoked-Function-ARN` header on the ++ // `/runtime/invocation/next` applicable). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' ++ // Note: This may be different from `faas.id` if an alias is involved. ++ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ++) ++ ++// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the ++// "aws.lambda.invoked_arn" semantic conventions. It represents the full ++// invoked ARN as provided on the `Context` passed to the function ++// (`Lambda-Runtime-Invoked-Function-ARN` header on the ++// `/runtime/invocation/next` applicable). ++func AWSLambdaInvokedARN(val string) attribute.KeyValue { ++ return AWSLambdaInvokedARNKey.String(val) ++} ++ ++// Attributes for CloudEvents. CloudEvents is a specification on how to define ++// event data in a standard way. These attributes can be attached to spans when ++// performing operations with CloudEvents, regardless of the protocol being ++// used. ++const ( ++ // CloudeventsEventIDKey is the attribute Key conforming to the ++ // "cloudevents.event_id" semantic conventions. It represents the ++ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) ++ // uniquely identifies the event. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' ++ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") ++ ++ // CloudeventsEventSourceKey is the attribute Key conforming to the ++ // "cloudevents.event_source" semantic conventions. It represents the ++ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) ++ // identifies the context in which an event happened. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'https://github.com/cloudevents', ++ // '/cloudevents/spec/pull/123', 'my-service' ++ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") ++ ++ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the ++ // "cloudevents.event_spec_version" semantic conventions. It represents the ++ // [version of the CloudEvents ++ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) ++ // which the event uses. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.0' ++ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") ++ ++ // CloudeventsEventTypeKey is the attribute Key conforming to the ++ // "cloudevents.event_type" semantic conventions. It represents the ++ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) ++ // contains a value describing the type of event related to the originating ++ // occurrence. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'com.github.pull_request.opened', ++ // 'com.example.object.deleted.v2' ++ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") ++ ++ // CloudeventsEventSubjectKey is the attribute Key conforming to the ++ // "cloudevents.event_subject" semantic conventions. It represents the ++ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) ++ // of the event in the context of the event producer (identified by ++ // source). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'mynewfile.jpg' ++ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ++) ++ ++// CloudeventsEventID returns an attribute KeyValue conforming to the ++// "cloudevents.event_id" semantic conventions. It represents the ++// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) ++// uniquely identifies the event. ++func CloudeventsEventID(val string) attribute.KeyValue { ++ return CloudeventsEventIDKey.String(val) ++} ++ ++// CloudeventsEventSource returns an attribute KeyValue conforming to the ++// "cloudevents.event_source" semantic conventions. It represents the ++// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) ++// identifies the context in which an event happened. ++func CloudeventsEventSource(val string) attribute.KeyValue { ++ return CloudeventsEventSourceKey.String(val) ++} ++ ++// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to ++// the "cloudevents.event_spec_version" semantic conventions. It represents the ++// [version of the CloudEvents ++// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) ++// which the event uses. ++func CloudeventsEventSpecVersion(val string) attribute.KeyValue { ++ return CloudeventsEventSpecVersionKey.String(val) ++} ++ ++// CloudeventsEventType returns an attribute KeyValue conforming to the ++// "cloudevents.event_type" semantic conventions. It represents the ++// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) ++// contains a value describing the type of event related to the originating ++// occurrence. ++func CloudeventsEventType(val string) attribute.KeyValue { ++ return CloudeventsEventTypeKey.String(val) ++} ++ ++// CloudeventsEventSubject returns an attribute KeyValue conforming to the ++// "cloudevents.event_subject" semantic conventions. It represents the ++// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) ++// of the event in the context of the event producer (identified by source). ++func CloudeventsEventSubject(val string) attribute.KeyValue { ++ return CloudeventsEventSubjectKey.String(val) ++} ++ ++// Semantic conventions for the OpenTracing Shim ++const ( ++ // OpentracingRefTypeKey is the attribute Key conforming to the ++ // "opentracing.ref_type" semantic conventions. It represents the ++ // parent-child Reference type ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: The causal relationship between a child Span and a parent Span. ++ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ++) ++ ++var ( ++ // The parent Span depends on the child Span in some capacity ++ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") ++ // The parent Span does not depend in any way on the result of the child Span ++ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ++) ++ ++// The attributes used to perform database client calls. ++const ( ++ // DBSystemKey is the attribute Key conforming to the "db.system" semantic ++ // conventions. It represents an identifier for the database management ++ // system (DBMS) product being used. See below for a list of well-known ++ // identifiers. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ DBSystemKey = attribute.Key("db.system") ++ ++ // DBConnectionStringKey is the attribute Key conforming to the ++ // "db.connection_string" semantic conventions. It represents the ++ // connection string used to connect to the database. It is recommended to ++ // remove embedded credentials. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' ++ DBConnectionStringKey = attribute.Key("db.connection_string") ++ ++ // DBUserKey is the attribute Key conforming to the "db.user" semantic ++ // conventions. It represents the username for accessing the database. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'readonly_user', 'reporting_user' ++ DBUserKey = attribute.Key("db.user") ++ ++ // DBJDBCDriverClassnameKey is the attribute Key conforming to the ++ // "db.jdbc.driver_classname" semantic conventions. It represents the ++ // fully-qualified class name of the [Java Database Connectivity ++ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) ++ // driver used to connect. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'org.postgresql.Driver', ++ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' ++ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") ++ ++ // DBNameKey is the attribute Key conforming to the "db.name" semantic ++ // conventions. It represents the this attribute is used to report the name ++ // of the database being accessed. For commands that switch the database, ++ // this should be set to the target database (even if the command fails). ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If applicable.) ++ // Stability: stable ++ // Examples: 'customers', 'main' ++ // Note: In some SQL databases, the database name to be used is called ++ // "schema name". In case there are multiple layers that could be ++ // considered for database name (e.g. Oracle instance name and schema ++ // name), the database name to be used is the more specific layer (e.g. ++ // Oracle schema name). ++ DBNameKey = attribute.Key("db.name") ++ ++ // DBStatementKey is the attribute Key conforming to the "db.statement" ++ // semantic conventions. It represents the database statement being ++ // executed. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If applicable and not ++ // explicitly disabled via instrumentation configuration.) ++ // Stability: stable ++ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' ++ // Note: The value may be sanitized to exclude sensitive information. ++ DBStatementKey = attribute.Key("db.statement") ++ ++ // DBOperationKey is the attribute Key conforming to the "db.operation" ++ // semantic conventions. It represents the name of the operation being ++ // executed, e.g. the [MongoDB command ++ // name](https://docs.mongodb.com/manual/reference/command/#database-operations) ++ // such as `findAndModify`, or the SQL keyword. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If `db.statement` is not ++ // applicable.) ++ // Stability: stable ++ // Examples: 'findAndModify', 'HMSET', 'SELECT' ++ // Note: When setting this to an SQL keyword, it is not recommended to ++ // attempt any client-side parsing of `db.statement` just to get this ++ // property, but it should be set if the operation name is provided by the ++ // library being instrumented. If the SQL statement has an ambiguous ++ // operation, or performs more than one operation, this value may be ++ // omitted. ++ DBOperationKey = attribute.Key("db.operation") ++) ++ ++var ( ++ // Some other SQL database. Fallback only. See notes ++ DBSystemOtherSQL = DBSystemKey.String("other_sql") ++ // Microsoft SQL Server ++ DBSystemMSSQL = DBSystemKey.String("mssql") ++ // MySQL ++ DBSystemMySQL = DBSystemKey.String("mysql") ++ // Oracle Database ++ DBSystemOracle = DBSystemKey.String("oracle") ++ // IBM DB2 ++ DBSystemDB2 = DBSystemKey.String("db2") ++ // PostgreSQL ++ DBSystemPostgreSQL = DBSystemKey.String("postgresql") ++ // Amazon Redshift ++ DBSystemRedshift = DBSystemKey.String("redshift") ++ // Apache Hive ++ DBSystemHive = DBSystemKey.String("hive") ++ // Cloudscape ++ DBSystemCloudscape = DBSystemKey.String("cloudscape") ++ // HyperSQL DataBase ++ DBSystemHSQLDB = DBSystemKey.String("hsqldb") ++ // Progress Database ++ DBSystemProgress = DBSystemKey.String("progress") ++ // SAP MaxDB ++ DBSystemMaxDB = DBSystemKey.String("maxdb") ++ // SAP HANA ++ DBSystemHanaDB = DBSystemKey.String("hanadb") ++ // Ingres ++ DBSystemIngres = DBSystemKey.String("ingres") ++ // FirstSQL ++ DBSystemFirstSQL = DBSystemKey.String("firstsql") ++ // EnterpriseDB ++ DBSystemEDB = DBSystemKey.String("edb") ++ // InterSystems Caché ++ DBSystemCache = DBSystemKey.String("cache") ++ // Adabas (Adaptable Database System) ++ DBSystemAdabas = DBSystemKey.String("adabas") ++ // Firebird ++ DBSystemFirebird = DBSystemKey.String("firebird") ++ // Apache Derby ++ DBSystemDerby = DBSystemKey.String("derby") ++ // FileMaker ++ DBSystemFilemaker = DBSystemKey.String("filemaker") ++ // Informix ++ DBSystemInformix = DBSystemKey.String("informix") ++ // InstantDB ++ DBSystemInstantDB = DBSystemKey.String("instantdb") ++ // InterBase ++ DBSystemInterbase = DBSystemKey.String("interbase") ++ // MariaDB ++ DBSystemMariaDB = DBSystemKey.String("mariadb") ++ // Netezza ++ DBSystemNetezza = DBSystemKey.String("netezza") ++ // Pervasive PSQL ++ DBSystemPervasive = DBSystemKey.String("pervasive") ++ // PointBase ++ DBSystemPointbase = DBSystemKey.String("pointbase") ++ // SQLite ++ DBSystemSqlite = DBSystemKey.String("sqlite") ++ // Sybase ++ DBSystemSybase = DBSystemKey.String("sybase") ++ // Teradata ++ DBSystemTeradata = DBSystemKey.String("teradata") ++ // Vertica ++ DBSystemVertica = DBSystemKey.String("vertica") ++ // H2 ++ DBSystemH2 = DBSystemKey.String("h2") ++ // ColdFusion IMQ ++ DBSystemColdfusion = DBSystemKey.String("coldfusion") ++ // Apache Cassandra ++ DBSystemCassandra = DBSystemKey.String("cassandra") ++ // Apache HBase ++ DBSystemHBase = DBSystemKey.String("hbase") ++ // MongoDB ++ DBSystemMongoDB = DBSystemKey.String("mongodb") ++ // Redis ++ DBSystemRedis = DBSystemKey.String("redis") ++ // Couchbase ++ DBSystemCouchbase = DBSystemKey.String("couchbase") ++ // CouchDB ++ DBSystemCouchDB = DBSystemKey.String("couchdb") ++ // Microsoft Azure Cosmos DB ++ DBSystemCosmosDB = DBSystemKey.String("cosmosdb") ++ // Amazon DynamoDB ++ DBSystemDynamoDB = DBSystemKey.String("dynamodb") ++ // Neo4j ++ DBSystemNeo4j = DBSystemKey.String("neo4j") ++ // Apache Geode ++ DBSystemGeode = DBSystemKey.String("geode") ++ // Elasticsearch ++ DBSystemElasticsearch = DBSystemKey.String("elasticsearch") ++ // Memcached ++ DBSystemMemcached = DBSystemKey.String("memcached") ++ // CockroachDB ++ DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ++ // OpenSearch ++ DBSystemOpensearch = DBSystemKey.String("opensearch") ++ // ClickHouse ++ DBSystemClickhouse = DBSystemKey.String("clickhouse") ++) ++ ++// DBConnectionString returns an attribute KeyValue conforming to the ++// "db.connection_string" semantic conventions. It represents the connection ++// string used to connect to the database. It is recommended to remove embedded ++// credentials. ++func DBConnectionString(val string) attribute.KeyValue { ++ return DBConnectionStringKey.String(val) ++} ++ ++// DBUser returns an attribute KeyValue conforming to the "db.user" semantic ++// conventions. It represents the username for accessing the database. ++func DBUser(val string) attribute.KeyValue { ++ return DBUserKey.String(val) ++} ++ ++// DBJDBCDriverClassname returns an attribute KeyValue conforming to the ++// "db.jdbc.driver_classname" semantic conventions. It represents the ++// fully-qualified class name of the [Java Database Connectivity ++// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver ++// used to connect. ++func DBJDBCDriverClassname(val string) attribute.KeyValue { ++ return DBJDBCDriverClassnameKey.String(val) ++} ++ ++// DBName returns an attribute KeyValue conforming to the "db.name" semantic ++// conventions. It represents the this attribute is used to report the name of ++// the database being accessed. For commands that switch the database, this ++// should be set to the target database (even if the command fails). ++func DBName(val string) attribute.KeyValue { ++ return DBNameKey.String(val) ++} ++ ++// DBStatement returns an attribute KeyValue conforming to the ++// "db.statement" semantic conventions. It represents the database statement ++// being executed. ++func DBStatement(val string) attribute.KeyValue { ++ return DBStatementKey.String(val) ++} ++ ++// DBOperation returns an attribute KeyValue conforming to the ++// "db.operation" semantic conventions. It represents the name of the operation ++// being executed, e.g. the [MongoDB command ++// name](https://docs.mongodb.com/manual/reference/command/#database-operations) ++// such as `findAndModify`, or the SQL keyword. ++func DBOperation(val string) attribute.KeyValue { ++ return DBOperationKey.String(val) ++} ++ ++// Connection-level attributes for Microsoft SQL Server ++const ( ++ // DBMSSQLInstanceNameKey is the attribute Key conforming to the ++ // "db.mssql.instance_name" semantic conventions. It represents the ++ // Microsoft SQL Server [instance ++ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) ++ // connecting to. This name is used to determine the port of a named ++ // instance. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MSSQLSERVER' ++ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no ++ // longer required (but still recommended if non-standard). ++ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ++) ++ ++// DBMSSQLInstanceName returns an attribute KeyValue conforming to the ++// "db.mssql.instance_name" semantic conventions. It represents the Microsoft ++// SQL Server [instance ++// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) ++// connecting to. This name is used to determine the port of a named instance. ++func DBMSSQLInstanceName(val string) attribute.KeyValue { ++ return DBMSSQLInstanceNameKey.String(val) ++} ++ ++// Call-level attributes for Cassandra ++const ( ++ // DBCassandraPageSizeKey is the attribute Key conforming to the ++ // "db.cassandra.page_size" semantic conventions. It represents the fetch ++ // size used for paging, i.e. how many rows will be returned at once. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 5000 ++ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") ++ ++ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the ++ // "db.cassandra.consistency_level" semantic conventions. It represents the ++ // consistency level of the query. Based on consistency values from ++ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") ++ ++ // DBCassandraTableKey is the attribute Key conforming to the ++ // "db.cassandra.table" semantic conventions. It represents the name of the ++ // primary table that the operation is acting upon, including the keyspace ++ // name (if applicable). ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'mytable' ++ // Note: This mirrors the db.sql.table attribute but references cassandra ++ // rather than sql. It is not recommended to attempt any client-side ++ // parsing of `db.statement` just to get this property, but it should be ++ // set if it is provided by the library being instrumented. If the ++ // operation is acting upon an anonymous table, or more than one table, ++ // this value MUST NOT be set. ++ DBCassandraTableKey = attribute.Key("db.cassandra.table") ++ ++ // DBCassandraIdempotenceKey is the attribute Key conforming to the ++ // "db.cassandra.idempotence" semantic conventions. It represents the ++ // whether or not the query is idempotent. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") ++ ++ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming ++ // to the "db.cassandra.speculative_execution_count" semantic conventions. ++ // It represents the number of times a query was speculatively executed. ++ // Not set or `0` if the query was not executed speculatively. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 0, 2 ++ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") ++ ++ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the ++ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID ++ // of the coordinating node for a query. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' ++ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") ++ ++ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the ++ // "db.cassandra.coordinator.dc" semantic conventions. It represents the ++ // data center of the coordinating node for a query. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-west-2' ++ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ++) ++ ++var ( ++ // all ++ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") ++ // each_quorum ++ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") ++ // quorum ++ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") ++ // local_quorum ++ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") ++ // one ++ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") ++ // two ++ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") ++ // three ++ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") ++ // local_one ++ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") ++ // any ++ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") ++ // serial ++ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") ++ // local_serial ++ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ++) ++ ++// DBCassandraPageSize returns an attribute KeyValue conforming to the ++// "db.cassandra.page_size" semantic conventions. It represents the fetch size ++// used for paging, i.e. how many rows will be returned at once. ++func DBCassandraPageSize(val int) attribute.KeyValue { ++ return DBCassandraPageSizeKey.Int(val) ++} ++ ++// DBCassandraTable returns an attribute KeyValue conforming to the ++// "db.cassandra.table" semantic conventions. It represents the name of the ++// primary table that the operation is acting upon, including the keyspace name ++// (if applicable). ++func DBCassandraTable(val string) attribute.KeyValue { ++ return DBCassandraTableKey.String(val) ++} ++ ++// DBCassandraIdempotence returns an attribute KeyValue conforming to the ++// "db.cassandra.idempotence" semantic conventions. It represents the whether ++// or not the query is idempotent. ++func DBCassandraIdempotence(val bool) attribute.KeyValue { ++ return DBCassandraIdempotenceKey.Bool(val) ++} ++ ++// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue ++// conforming to the "db.cassandra.speculative_execution_count" semantic ++// conventions. It represents the number of times a query was speculatively ++// executed. Not set or `0` if the query was not executed speculatively. ++func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { ++ return DBCassandraSpeculativeExecutionCountKey.Int(val) ++} ++ ++// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the ++// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of ++// the coordinating node for a query. ++func DBCassandraCoordinatorID(val string) attribute.KeyValue { ++ return DBCassandraCoordinatorIDKey.String(val) ++} ++ ++// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the ++// "db.cassandra.coordinator.dc" semantic conventions. It represents the data ++// center of the coordinating node for a query. ++func DBCassandraCoordinatorDC(val string) attribute.KeyValue { ++ return DBCassandraCoordinatorDCKey.String(val) ++} ++ ++// Call-level attributes for Redis ++const ( ++ // DBRedisDBIndexKey is the attribute Key conforming to the ++ // "db.redis.database_index" semantic conventions. It represents the index ++ // of the database being accessed as used in the [`SELECT` ++ // command](https://redis.io/commands/select), provided as an integer. To ++ // be used instead of the generic `db.name` attribute. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If other than the default ++ // database (`0`).) ++ // Stability: stable ++ // Examples: 0, 1, 15 ++ DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ++) ++ ++// DBRedisDBIndex returns an attribute KeyValue conforming to the ++// "db.redis.database_index" semantic conventions. It represents the index of ++// the database being accessed as used in the [`SELECT` ++// command](https://redis.io/commands/select), provided as an integer. To be ++// used instead of the generic `db.name` attribute. ++func DBRedisDBIndex(val int) attribute.KeyValue { ++ return DBRedisDBIndexKey.Int(val) ++} ++ ++// Call-level attributes for MongoDB ++const ( ++ // DBMongoDBCollectionKey is the attribute Key conforming to the ++ // "db.mongodb.collection" semantic conventions. It represents the ++ // collection being accessed within the database stated in `db.name`. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'customers', 'products' ++ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ++) ++ ++// DBMongoDBCollection returns an attribute KeyValue conforming to the ++// "db.mongodb.collection" semantic conventions. It represents the collection ++// being accessed within the database stated in `db.name`. ++func DBMongoDBCollection(val string) attribute.KeyValue { ++ return DBMongoDBCollectionKey.String(val) ++} ++ ++// Call-level attributes for SQL databases ++const ( ++ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" ++ // semantic conventions. It represents the name of the primary table that ++ // the operation is acting upon, including the database name (if ++ // applicable). ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'public.users', 'customers' ++ // Note: It is not recommended to attempt any client-side parsing of ++ // `db.statement` just to get this property, but it should be set if it is ++ // provided by the library being instrumented. If the operation is acting ++ // upon an anonymous table, or more than one table, this value MUST NOT be ++ // set. ++ DBSQLTableKey = attribute.Key("db.sql.table") ++) ++ ++// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" ++// semantic conventions. It represents the name of the primary table that the ++// operation is acting upon, including the database name (if applicable). ++func DBSQLTable(val string) attribute.KeyValue { ++ return DBSQLTableKey.String(val) ++} ++ ++// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's ++// concepts. ++const ( ++ // OtelStatusCodeKey is the attribute Key conforming to the ++ // "otel.status_code" semantic conventions. It represents the name of the ++ // code, either "OK" or "ERROR". MUST NOT be set if the status code is ++ // UNSET. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ OtelStatusCodeKey = attribute.Key("otel.status_code") ++ ++ // OtelStatusDescriptionKey is the attribute Key conforming to the ++ // "otel.status_description" semantic conventions. It represents the ++ // description of the Status if it has a value, otherwise not set. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'resource not found' ++ OtelStatusDescriptionKey = attribute.Key("otel.status_description") ++) ++ ++var ( ++ // The operation has been validated by an Application developer or Operator to have completed successfully ++ OtelStatusCodeOk = OtelStatusCodeKey.String("OK") ++ // The operation contains an error ++ OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") ++) ++ ++// OtelStatusDescription returns an attribute KeyValue conforming to the ++// "otel.status_description" semantic conventions. It represents the ++// description of the Status if it has a value, otherwise not set. ++func OtelStatusDescription(val string) attribute.KeyValue { ++ return OtelStatusDescriptionKey.String(val) ++} ++ ++// This semantic convention describes an instance of a function that runs ++// without provisioning or managing of servers (also known as serverless ++// functions or Function as a Service (FaaS)) with spans. ++const ( ++ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" ++ // semantic conventions. It represents the type of the trigger which caused ++ // this function execution. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: For the server/consumer span on the incoming side, ++ // `faas.trigger` MUST be set. ++ // ++ // Clients invoking FaaS instances usually cannot set `faas.trigger`, ++ // since they would typically need to look in the payload to determine ++ // the event type. If clients set it, it should be the same as the ++ // trigger that corresponding incoming would have (i.e., this has ++ // nothing to do with the underlying transport used to make the API ++ // call to invoke the lambda, which is often HTTP). ++ FaaSTriggerKey = attribute.Key("faas.trigger") ++ ++ // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" ++ // semantic conventions. It represents the execution ID of the current ++ // function execution. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' ++ FaaSExecutionKey = attribute.Key("faas.execution") ++) ++ ++var ( ++ // A response to some data source operation such as a database or filesystem read/write ++ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") ++ // To provide an answer to an inbound HTTP request ++ FaaSTriggerHTTP = FaaSTriggerKey.String("http") ++ // A function is set to be executed when messages are sent to a messaging system ++ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") ++ // A function is scheduled to be executed regularly ++ FaaSTriggerTimer = FaaSTriggerKey.String("timer") ++ // If none of the others apply ++ FaaSTriggerOther = FaaSTriggerKey.String("other") ++) ++ ++// FaaSExecution returns an attribute KeyValue conforming to the ++// "faas.execution" semantic conventions. It represents the execution ID of the ++// current function execution. ++func FaaSExecution(val string) attribute.KeyValue { ++ return FaaSExecutionKey.String(val) ++} ++ ++// Semantic Convention for FaaS triggered as a response to some data source ++// operation such as a database or filesystem read/write. ++const ( ++ // FaaSDocumentCollectionKey is the attribute Key conforming to the ++ // "faas.document.collection" semantic conventions. It represents the name ++ // of the source on which the triggering operation was performed. For ++ // example, in Cloud Storage or S3 corresponds to the bucket name, and in ++ // Cosmos DB to the database name. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myBucketName', 'myDBName' ++ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") ++ ++ // FaaSDocumentOperationKey is the attribute Key conforming to the ++ // "faas.document.operation" semantic conventions. It represents the ++ // describes the type of the operation that was performed on the data. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ FaaSDocumentOperationKey = attribute.Key("faas.document.operation") ++ ++ // FaaSDocumentTimeKey is the attribute Key conforming to the ++ // "faas.document.time" semantic conventions. It represents a string ++ // containing the time when the data was accessed in the [ISO ++ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2020-01-23T13:47:06Z' ++ FaaSDocumentTimeKey = attribute.Key("faas.document.time") ++ ++ // FaaSDocumentNameKey is the attribute Key conforming to the ++ // "faas.document.name" semantic conventions. It represents the document ++ // name/table subjected to the operation. For example, in Cloud Storage or ++ // S3 is the name of the file, and in Cosmos DB the table name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'myFile.txt', 'myTableName' ++ FaaSDocumentNameKey = attribute.Key("faas.document.name") ++) ++ ++var ( ++ // When a new object is created ++ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") ++ // When an object is modified ++ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") ++ // When an object is deleted ++ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ++) ++ ++// FaaSDocumentCollection returns an attribute KeyValue conforming to the ++// "faas.document.collection" semantic conventions. It represents the name of ++// the source on which the triggering operation was performed. For example, in ++// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the ++// database name. ++func FaaSDocumentCollection(val string) attribute.KeyValue { ++ return FaaSDocumentCollectionKey.String(val) ++} ++ ++// FaaSDocumentTime returns an attribute KeyValue conforming to the ++// "faas.document.time" semantic conventions. It represents a string containing ++// the time when the data was accessed in the [ISO ++// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++func FaaSDocumentTime(val string) attribute.KeyValue { ++ return FaaSDocumentTimeKey.String(val) ++} ++ ++// FaaSDocumentName returns an attribute KeyValue conforming to the ++// "faas.document.name" semantic conventions. It represents the document ++// name/table subjected to the operation. For example, in Cloud Storage or S3 ++// is the name of the file, and in Cosmos DB the table name. ++func FaaSDocumentName(val string) attribute.KeyValue { ++ return FaaSDocumentNameKey.String(val) ++} ++ ++// Semantic Convention for FaaS scheduled to be executed regularly. ++const ( ++ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic ++ // conventions. It represents a string containing the function invocation ++ // time in the [ISO ++ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2020-01-23T13:47:06Z' ++ FaaSTimeKey = attribute.Key("faas.time") ++ ++ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic ++ // conventions. It represents a string containing the schedule period as ++ // [Cron ++ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0/5 * * * ? *' ++ FaaSCronKey = attribute.Key("faas.cron") ++) ++ ++// FaaSTime returns an attribute KeyValue conforming to the "faas.time" ++// semantic conventions. It represents a string containing the function ++// invocation time in the [ISO ++// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++func FaaSTime(val string) attribute.KeyValue { ++ return FaaSTimeKey.String(val) ++} ++ ++// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" ++// semantic conventions. It represents a string containing the schedule period ++// as [Cron ++// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). ++func FaaSCron(val string) attribute.KeyValue { ++ return FaaSCronKey.String(val) ++} ++ ++// Contains additional attributes for incoming FaaS spans. ++const ( ++ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" ++ // semantic conventions. It represents a boolean that is true if the ++ // serverless function is executed for the first time (aka cold-start). ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ FaaSColdstartKey = attribute.Key("faas.coldstart") ++) ++ ++// FaaSColdstart returns an attribute KeyValue conforming to the ++// "faas.coldstart" semantic conventions. It represents a boolean that is true ++// if the serverless function is executed for the first time (aka cold-start). ++func FaaSColdstart(val bool) attribute.KeyValue { ++ return FaaSColdstartKey.Bool(val) ++} ++ ++// Contains additional attributes for outgoing FaaS spans. ++const ( ++ // FaaSInvokedNameKey is the attribute Key conforming to the ++ // "faas.invoked_name" semantic conventions. It represents the name of the ++ // invoked function. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'my-function' ++ // Note: SHOULD be equal to the `faas.name` resource attribute of the ++ // invoked function. ++ FaaSInvokedNameKey = attribute.Key("faas.invoked_name") ++ ++ // FaaSInvokedProviderKey is the attribute Key conforming to the ++ // "faas.invoked_provider" semantic conventions. It represents the cloud ++ // provider of the invoked function. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the ++ // invoked function. ++ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") ++ ++ // FaaSInvokedRegionKey is the attribute Key conforming to the ++ // "faas.invoked_region" semantic conventions. It represents the cloud ++ // region of the invoked function. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (For some cloud providers, like ++ // AWS or GCP, the region in which a function is hosted is essential to ++ // uniquely identify the function and also part of its endpoint. Since it's ++ // part of the endpoint being called, the region is always known to ++ // clients. In these cases, `faas.invoked_region` MUST be set accordingly. ++ // If the region is unknown to the client or not required for identifying ++ // the invoked function, setting `faas.invoked_region` is optional.) ++ // Stability: stable ++ // Examples: 'eu-central-1' ++ // Note: SHOULD be equal to the `cloud.region` resource attribute of the ++ // invoked function. ++ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ++) ++ ++var ( ++ // Alibaba Cloud ++ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") ++ // Amazon Web Services ++ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") ++ // Microsoft Azure ++ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") ++ // Google Cloud Platform ++ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") ++ // Tencent Cloud ++ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ++) ++ ++// FaaSInvokedName returns an attribute KeyValue conforming to the ++// "faas.invoked_name" semantic conventions. It represents the name of the ++// invoked function. ++func FaaSInvokedName(val string) attribute.KeyValue { ++ return FaaSInvokedNameKey.String(val) ++} ++ ++// FaaSInvokedRegion returns an attribute KeyValue conforming to the ++// "faas.invoked_region" semantic conventions. It represents the cloud region ++// of the invoked function. ++func FaaSInvokedRegion(val string) attribute.KeyValue { ++ return FaaSInvokedRegionKey.String(val) ++} ++ ++// These attributes may be used for any network related operation. ++const ( ++ // NetTransportKey is the attribute Key conforming to the "net.transport" ++ // semantic conventions. It represents the transport protocol used. See ++ // note below. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ NetTransportKey = attribute.Key("net.transport") ++ ++ // NetAppProtocolNameKey is the attribute Key conforming to the ++ // "net.app.protocol.name" semantic conventions. It represents the ++ // application layer protocol used. The value SHOULD be normalized to ++ // lowercase. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'amqp', 'http', 'mqtt' ++ NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") ++ ++ // NetAppProtocolVersionKey is the attribute Key conforming to the ++ // "net.app.protocol.version" semantic conventions. It represents the ++ // version of the application layer protocol used. See note below. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '3.1.1' ++ // Note: `net.app.protocol.version` refers to the version of the protocol ++ // used and might be different from the protocol client's version. If the ++ // HTTP client used has a version of `0.27.2`, but sends HTTP version ++ // `1.1`, this attribute should be set to `1.1`. ++ NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") ++ ++ // NetSockPeerNameKey is the attribute Key conforming to the ++ // "net.sock.peer.name" semantic conventions. It represents the remote ++ // socket peer name. ++ // ++ // Type: string ++ // RequirementLevel: Recommended (If available and different from ++ // `net.peer.name` and if `net.sock.peer.addr` is set.) ++ // Stability: stable ++ // Examples: 'proxy.example.com' ++ NetSockPeerNameKey = attribute.Key("net.sock.peer.name") ++ ++ // NetSockPeerAddrKey is the attribute Key conforming to the ++ // "net.sock.peer.addr" semantic conventions. It represents the remote ++ // socket peer address: IPv4 or IPv6 for internet protocols, path for local ++ // communication, ++ // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '127.0.0.1', '/tmp/mysql.sock' ++ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") ++ ++ // NetSockPeerPortKey is the attribute Key conforming to the ++ // "net.sock.peer.port" semantic conventions. It represents the remote ++ // socket peer port. ++ // ++ // Type: int ++ // RequirementLevel: Recommended (If defined for the address family and if ++ // different than `net.peer.port` and if `net.sock.peer.addr` is set.) ++ // Stability: stable ++ // Examples: 16456 ++ NetSockPeerPortKey = attribute.Key("net.sock.peer.port") ++ ++ // NetSockFamilyKey is the attribute Key conforming to the ++ // "net.sock.family" semantic conventions. It represents the protocol ++ // [address ++ // family](https://man7.org/linux/man-pages/man7/address_families.7.html) ++ // which is used for communication. ++ // ++ // Type: Enum ++ // RequirementLevel: ConditionallyRequired (If different than `inet` and if ++ // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers ++ // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in ++ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support ++ // instrumentations that follow previous versions of this document.) ++ // Stability: stable ++ // Examples: 'inet6', 'bluetooth' ++ NetSockFamilyKey = attribute.Key("net.sock.family") ++ ++ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" ++ // semantic conventions. It represents the logical remote hostname, see ++ // note below. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'example.com' ++ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an ++ // extra DNS lookup. ++ NetPeerNameKey = attribute.Key("net.peer.name") ++ ++ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" ++ // semantic conventions. It represents the logical remote port number ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 80, 8080, 443 ++ NetPeerPortKey = attribute.Key("net.peer.port") ++ ++ // NetHostNameKey is the attribute Key conforming to the "net.host.name" ++ // semantic conventions. It represents the logical local hostname or ++ // similar, see note below. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'localhost' ++ NetHostNameKey = attribute.Key("net.host.name") ++ ++ // NetHostPortKey is the attribute Key conforming to the "net.host.port" ++ // semantic conventions. It represents the logical local port number, ++ // preferably the one that the peer used to connect ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 8080 ++ NetHostPortKey = attribute.Key("net.host.port") ++ ++ // NetSockHostAddrKey is the attribute Key conforming to the ++ // "net.sock.host.addr" semantic conventions. It represents the local ++ // socket address. Useful in case of a multi-IP host. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '192.168.0.1' ++ NetSockHostAddrKey = attribute.Key("net.sock.host.addr") ++ ++ // NetSockHostPortKey is the attribute Key conforming to the ++ // "net.sock.host.port" semantic conventions. It represents the local ++ // socket port number. ++ // ++ // Type: int ++ // RequirementLevel: Recommended (If defined for the address family and if ++ // different than `net.host.port` and if `net.sock.host.addr` is set.) ++ // Stability: stable ++ // Examples: 35555 ++ NetSockHostPortKey = attribute.Key("net.sock.host.port") ++ ++ // NetHostConnectionTypeKey is the attribute Key conforming to the ++ // "net.host.connection.type" semantic conventions. It represents the ++ // internet connection type currently being used by the host. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'wifi' ++ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") ++ ++ // NetHostConnectionSubtypeKey is the attribute Key conforming to the ++ // "net.host.connection.subtype" semantic conventions. It represents the ++ // this describes more details regarding the connection.type. It may be the ++ // type of cell technology connection, but it could be used for describing ++ // details about a wifi connection. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'LTE' ++ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") ++ ++ // NetHostCarrierNameKey is the attribute Key conforming to the ++ // "net.host.carrier.name" semantic conventions. It represents the name of ++ // the mobile carrier. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'sprint' ++ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") ++ ++ // NetHostCarrierMccKey is the attribute Key conforming to the ++ // "net.host.carrier.mcc" semantic conventions. It represents the mobile ++ // carrier country code. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '310' ++ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") ++ ++ // NetHostCarrierMncKey is the attribute Key conforming to the ++ // "net.host.carrier.mnc" semantic conventions. It represents the mobile ++ // carrier network code. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '001' ++ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") ++ ++ // NetHostCarrierIccKey is the attribute Key conforming to the ++ // "net.host.carrier.icc" semantic conventions. It represents the ISO ++ // 3166-1 alpha-2 2-character country code associated with the mobile ++ // carrier network. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'DE' ++ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ++) ++ ++var ( ++ // ip_tcp ++ NetTransportTCP = NetTransportKey.String("ip_tcp") ++ // ip_udp ++ NetTransportUDP = NetTransportKey.String("ip_udp") ++ // Named or anonymous pipe. See note below ++ NetTransportPipe = NetTransportKey.String("pipe") ++ // In-process communication ++ NetTransportInProc = NetTransportKey.String("inproc") ++ // Something else (non IP-based) ++ NetTransportOther = NetTransportKey.String("other") ++) ++ ++var ( ++ // IPv4 address ++ NetSockFamilyInet = NetSockFamilyKey.String("inet") ++ // IPv6 address ++ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") ++ // Unix domain socket path ++ NetSockFamilyUnix = NetSockFamilyKey.String("unix") ++) ++ ++var ( ++ // wifi ++ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") ++ // wired ++ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") ++ // cell ++ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") ++ // unavailable ++ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") ++ // unknown ++ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ++) ++ ++var ( ++ // GPRS ++ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") ++ // EDGE ++ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") ++ // UMTS ++ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") ++ // CDMA ++ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") ++ // EVDO Rel. 0 ++ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") ++ // EVDO Rev. A ++ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") ++ // CDMA2000 1XRTT ++ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") ++ // HSDPA ++ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") ++ // HSUPA ++ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") ++ // HSPA ++ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") ++ // IDEN ++ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") ++ // EVDO Rev. B ++ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") ++ // LTE ++ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") ++ // EHRPD ++ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") ++ // HSPAP ++ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") ++ // GSM ++ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") ++ // TD-SCDMA ++ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") ++ // IWLAN ++ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") ++ // 5G NR (New Radio) ++ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") ++ // 5G NRNSA (New Radio Non-Standalone) ++ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") ++ // LTE CA ++ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ++) ++ ++// NetAppProtocolName returns an attribute KeyValue conforming to the ++// "net.app.protocol.name" semantic conventions. It represents the application ++// layer protocol used. The value SHOULD be normalized to lowercase. ++func NetAppProtocolName(val string) attribute.KeyValue { ++ return NetAppProtocolNameKey.String(val) ++} ++ ++// NetAppProtocolVersion returns an attribute KeyValue conforming to the ++// "net.app.protocol.version" semantic conventions. It represents the version ++// of the application layer protocol used. See note below. ++func NetAppProtocolVersion(val string) attribute.KeyValue { ++ return NetAppProtocolVersionKey.String(val) ++} ++ ++// NetSockPeerName returns an attribute KeyValue conforming to the ++// "net.sock.peer.name" semantic conventions. It represents the remote socket ++// peer name. ++func NetSockPeerName(val string) attribute.KeyValue { ++ return NetSockPeerNameKey.String(val) ++} ++ ++// NetSockPeerAddr returns an attribute KeyValue conforming to the ++// "net.sock.peer.addr" semantic conventions. It represents the remote socket ++// peer address: IPv4 or IPv6 for internet protocols, path for local ++// communication, ++// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). ++func NetSockPeerAddr(val string) attribute.KeyValue { ++ return NetSockPeerAddrKey.String(val) ++} ++ ++// NetSockPeerPort returns an attribute KeyValue conforming to the ++// "net.sock.peer.port" semantic conventions. It represents the remote socket ++// peer port. ++func NetSockPeerPort(val int) attribute.KeyValue { ++ return NetSockPeerPortKey.Int(val) ++} ++ ++// NetPeerName returns an attribute KeyValue conforming to the ++// "net.peer.name" semantic conventions. It represents the logical remote ++// hostname, see note below. ++func NetPeerName(val string) attribute.KeyValue { ++ return NetPeerNameKey.String(val) ++} ++ ++// NetPeerPort returns an attribute KeyValue conforming to the ++// "net.peer.port" semantic conventions. It represents the logical remote port ++// number ++func NetPeerPort(val int) attribute.KeyValue { ++ return NetPeerPortKey.Int(val) ++} ++ ++// NetHostName returns an attribute KeyValue conforming to the ++// "net.host.name" semantic conventions. It represents the logical local ++// hostname or similar, see note below. ++func NetHostName(val string) attribute.KeyValue { ++ return NetHostNameKey.String(val) ++} ++ ++// NetHostPort returns an attribute KeyValue conforming to the ++// "net.host.port" semantic conventions. It represents the logical local port ++// number, preferably the one that the peer used to connect ++func NetHostPort(val int) attribute.KeyValue { ++ return NetHostPortKey.Int(val) ++} ++ ++// NetSockHostAddr returns an attribute KeyValue conforming to the ++// "net.sock.host.addr" semantic conventions. It represents the local socket ++// address. Useful in case of a multi-IP host. ++func NetSockHostAddr(val string) attribute.KeyValue { ++ return NetSockHostAddrKey.String(val) ++} ++ ++// NetSockHostPort returns an attribute KeyValue conforming to the ++// "net.sock.host.port" semantic conventions. It represents the local socket ++// port number. ++func NetSockHostPort(val int) attribute.KeyValue { ++ return NetSockHostPortKey.Int(val) ++} ++ ++// NetHostCarrierName returns an attribute KeyValue conforming to the ++// "net.host.carrier.name" semantic conventions. It represents the name of the ++// mobile carrier. ++func NetHostCarrierName(val string) attribute.KeyValue { ++ return NetHostCarrierNameKey.String(val) ++} ++ ++// NetHostCarrierMcc returns an attribute KeyValue conforming to the ++// "net.host.carrier.mcc" semantic conventions. It represents the mobile ++// carrier country code. ++func NetHostCarrierMcc(val string) attribute.KeyValue { ++ return NetHostCarrierMccKey.String(val) ++} ++ ++// NetHostCarrierMnc returns an attribute KeyValue conforming to the ++// "net.host.carrier.mnc" semantic conventions. It represents the mobile ++// carrier network code. ++func NetHostCarrierMnc(val string) attribute.KeyValue { ++ return NetHostCarrierMncKey.String(val) ++} ++ ++// NetHostCarrierIcc returns an attribute KeyValue conforming to the ++// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 ++// alpha-2 2-character country code associated with the mobile carrier network. ++func NetHostCarrierIcc(val string) attribute.KeyValue { ++ return NetHostCarrierIccKey.String(val) ++} ++ ++// Operations that access some remote service. ++const ( ++ // PeerServiceKey is the attribute Key conforming to the "peer.service" ++ // semantic conventions. It represents the ++ // [`service.name`](../../resource/semantic_conventions/README.md#service) ++ // of the remote service. SHOULD be equal to the actual `service.name` ++ // resource attribute of the remote service if any. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'AuthTokenCache' ++ PeerServiceKey = attribute.Key("peer.service") ++) ++ ++// PeerService returns an attribute KeyValue conforming to the ++// "peer.service" semantic conventions. It represents the ++// [`service.name`](../../resource/semantic_conventions/README.md#service) of ++// the remote service. SHOULD be equal to the actual `service.name` resource ++// attribute of the remote service if any. ++func PeerService(val string) attribute.KeyValue { ++ return PeerServiceKey.String(val) ++} ++ ++// These attributes may be used for any operation with an authenticated and/or ++// authorized enduser. ++const ( ++ // EnduserIDKey is the attribute Key conforming to the "enduser.id" ++ // semantic conventions. It represents the username or client_id extracted ++ // from the access token or ++ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header ++ // in the inbound request from outside the system. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'username' ++ EnduserIDKey = attribute.Key("enduser.id") ++ ++ // EnduserRoleKey is the attribute Key conforming to the "enduser.role" ++ // semantic conventions. It represents the actual/assumed role the client ++ // is making the request under extracted from token or application security ++ // context. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'admin' ++ EnduserRoleKey = attribute.Key("enduser.role") ++ ++ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" ++ // semantic conventions. It represents the scopes or granted authorities ++ // the client currently possesses extracted from token or application ++ // security context. The value would come from the scope associated with an ++ // [OAuth 2.0 Access ++ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute ++ // value in a [SAML 2.0 ++ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'read:message, write:files' ++ EnduserScopeKey = attribute.Key("enduser.scope") ++) ++ ++// EnduserID returns an attribute KeyValue conforming to the "enduser.id" ++// semantic conventions. It represents the username or client_id extracted from ++// the access token or ++// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in ++// the inbound request from outside the system. ++func EnduserID(val string) attribute.KeyValue { ++ return EnduserIDKey.String(val) ++} ++ ++// EnduserRole returns an attribute KeyValue conforming to the ++// "enduser.role" semantic conventions. It represents the actual/assumed role ++// the client is making the request under extracted from token or application ++// security context. ++func EnduserRole(val string) attribute.KeyValue { ++ return EnduserRoleKey.String(val) ++} ++ ++// EnduserScope returns an attribute KeyValue conforming to the ++// "enduser.scope" semantic conventions. It represents the scopes or granted ++// authorities the client currently possesses extracted from token or ++// application security context. The value would come from the scope associated ++// with an [OAuth 2.0 Access ++// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute ++// value in a [SAML 2.0 ++// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). ++func EnduserScope(val string) attribute.KeyValue { ++ return EnduserScopeKey.String(val) ++} ++ ++// These attributes may be used for any operation to store information about a ++// thread that started a span. ++const ( ++ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic ++ // conventions. It represents the current "managed" thread ID (as opposed ++ // to OS thread ID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ ThreadIDKey = attribute.Key("thread.id") ++ ++ // ThreadNameKey is the attribute Key conforming to the "thread.name" ++ // semantic conventions. It represents the current thread name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'main' ++ ThreadNameKey = attribute.Key("thread.name") ++) ++ ++// ThreadID returns an attribute KeyValue conforming to the "thread.id" ++// semantic conventions. It represents the current "managed" thread ID (as ++// opposed to OS thread ID). ++func ThreadID(val int) attribute.KeyValue { ++ return ThreadIDKey.Int(val) ++} ++ ++// ThreadName returns an attribute KeyValue conforming to the "thread.name" ++// semantic conventions. It represents the current thread name. ++func ThreadName(val string) attribute.KeyValue { ++ return ThreadNameKey.String(val) ++} ++ ++// These attributes allow to report this unit of code and therefore to provide ++// more context about the span. ++const ( ++ // CodeFunctionKey is the attribute Key conforming to the "code.function" ++ // semantic conventions. It represents the method or function name, or ++ // equivalent (usually rightmost part of the code unit's name). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'serveRequest' ++ CodeFunctionKey = attribute.Key("code.function") ++ ++ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" ++ // semantic conventions. It represents the "namespace" within which ++ // `code.function` is defined. Usually the qualified class or module name, ++ // such that `code.namespace` + some separator + `code.function` form a ++ // unique identifier for the code unit. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'com.example.MyHTTPService' ++ CodeNamespaceKey = attribute.Key("code.namespace") ++ ++ // CodeFilepathKey is the attribute Key conforming to the "code.filepath" ++ // semantic conventions. It represents the source code file name that ++ // identifies the code unit as uniquely as possible (preferably an absolute ++ // file path). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/usr/local/MyApplication/content_root/app/index.php' ++ CodeFilepathKey = attribute.Key("code.filepath") ++ ++ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" ++ // semantic conventions. It represents the line number in `code.filepath` ++ // best representing the operation. It SHOULD point within the code unit ++ // named in `code.function`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ CodeLineNumberKey = attribute.Key("code.lineno") ++ ++ // CodeColumnKey is the attribute Key conforming to the "code.column" ++ // semantic conventions. It represents the column number in `code.filepath` ++ // best representing the operation. It SHOULD point within the code unit ++ // named in `code.function`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 16 ++ CodeColumnKey = attribute.Key("code.column") ++) ++ ++// CodeFunction returns an attribute KeyValue conforming to the ++// "code.function" semantic conventions. It represents the method or function ++// name, or equivalent (usually rightmost part of the code unit's name). ++func CodeFunction(val string) attribute.KeyValue { ++ return CodeFunctionKey.String(val) ++} ++ ++// CodeNamespace returns an attribute KeyValue conforming to the ++// "code.namespace" semantic conventions. It represents the "namespace" within ++// which `code.function` is defined. Usually the qualified class or module ++// name, such that `code.namespace` + some separator + `code.function` form a ++// unique identifier for the code unit. ++func CodeNamespace(val string) attribute.KeyValue { ++ return CodeNamespaceKey.String(val) ++} ++ ++// CodeFilepath returns an attribute KeyValue conforming to the ++// "code.filepath" semantic conventions. It represents the source code file ++// name that identifies the code unit as uniquely as possible (preferably an ++// absolute file path). ++func CodeFilepath(val string) attribute.KeyValue { ++ return CodeFilepathKey.String(val) ++} ++ ++// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" ++// semantic conventions. It represents the line number in `code.filepath` best ++// representing the operation. It SHOULD point within the code unit named in ++// `code.function`. ++func CodeLineNumber(val int) attribute.KeyValue { ++ return CodeLineNumberKey.Int(val) ++} ++ ++// CodeColumn returns an attribute KeyValue conforming to the "code.column" ++// semantic conventions. It represents the column number in `code.filepath` ++// best representing the operation. It SHOULD point within the code unit named ++// in `code.function`. ++func CodeColumn(val int) attribute.KeyValue { ++ return CodeColumnKey.Int(val) ++} ++ ++// Semantic conventions for HTTP client and server Spans. ++const ( ++ // HTTPMethodKey is the attribute Key conforming to the "http.method" ++ // semantic conventions. It represents the hTTP request method. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'GET', 'POST', 'HEAD' ++ HTTPMethodKey = attribute.Key("http.method") ++ ++ // HTTPStatusCodeKey is the attribute Key conforming to the ++ // "http.status_code" semantic conventions. It represents the [HTTP ++ // response status code](https://tools.ietf.org/html/rfc7231#section-6). ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If and only if one was ++ // received/sent.) ++ // Stability: stable ++ // Examples: 200 ++ HTTPStatusCodeKey = attribute.Key("http.status_code") ++ ++ // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" ++ // semantic conventions. It represents the kind of HTTP protocol used. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: If `net.transport` is not specified, it can be assumed to be ++ // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is ++ // assumed. ++ HTTPFlavorKey = attribute.Key("http.flavor") ++ ++ // HTTPUserAgentKey is the attribute Key conforming to the ++ // "http.user_agent" semantic conventions. It represents the value of the ++ // [HTTP ++ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) ++ // header sent by the client. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' ++ HTTPUserAgentKey = attribute.Key("http.user_agent") ++ ++ // HTTPRequestContentLengthKey is the attribute Key conforming to the ++ // "http.request_content_length" semantic conventions. It represents the ++ // size of the request payload body in bytes. This is the number of bytes ++ // transferred excluding headers and is often, but not always, present as ++ // the ++ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++ // header. For requests using transport encoding, this should be the ++ // compressed size. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3495 ++ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") ++ ++ // HTTPResponseContentLengthKey is the attribute Key conforming to the ++ // "http.response_content_length" semantic conventions. It represents the ++ // size of the response payload body in bytes. This is the number of bytes ++ // transferred excluding headers and is often, but not always, present as ++ // the ++ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++ // header. For requests using transport encoding, this should be the ++ // compressed size. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3495 ++ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ++) ++ ++var ( ++ // HTTP/1.0 ++ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") ++ // HTTP/1.1 ++ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") ++ // HTTP/2 ++ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") ++ // HTTP/3 ++ HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") ++ // SPDY protocol ++ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") ++ // QUIC protocol ++ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ++) ++ ++// HTTPMethod returns an attribute KeyValue conforming to the "http.method" ++// semantic conventions. It represents the hTTP request method. ++func HTTPMethod(val string) attribute.KeyValue { ++ return HTTPMethodKey.String(val) ++} ++ ++// HTTPStatusCode returns an attribute KeyValue conforming to the ++// "http.status_code" semantic conventions. It represents the [HTTP response ++// status code](https://tools.ietf.org/html/rfc7231#section-6). ++func HTTPStatusCode(val int) attribute.KeyValue { ++ return HTTPStatusCodeKey.Int(val) ++} ++ ++// HTTPUserAgent returns an attribute KeyValue conforming to the ++// "http.user_agent" semantic conventions. It represents the value of the [HTTP ++// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) ++// header sent by the client. ++func HTTPUserAgent(val string) attribute.KeyValue { ++ return HTTPUserAgentKey.String(val) ++} ++ ++// HTTPRequestContentLength returns an attribute KeyValue conforming to the ++// "http.request_content_length" semantic conventions. It represents the size ++// of the request payload body in bytes. This is the number of bytes ++// transferred excluding headers and is often, but not always, present as the ++// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++// header. For requests using transport encoding, this should be the compressed ++// size. ++func HTTPRequestContentLength(val int) attribute.KeyValue { ++ return HTTPRequestContentLengthKey.Int(val) ++} ++ ++// HTTPResponseContentLength returns an attribute KeyValue conforming to the ++// "http.response_content_length" semantic conventions. It represents the size ++// of the response payload body in bytes. This is the number of bytes ++// transferred excluding headers and is often, but not always, present as the ++// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++// header. For requests using transport encoding, this should be the compressed ++// size. ++func HTTPResponseContentLength(val int) attribute.KeyValue { ++ return HTTPResponseContentLengthKey.Int(val) ++} ++ ++// Semantic Convention for HTTP Client ++const ( ++ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic ++ // conventions. It represents the full HTTP request URL in the form ++ // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is ++ // not transmitted over HTTP, but if it is known, it should be included ++ // nevertheless. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' ++ // Note: `http.url` MUST NOT contain credentials passed via URL in form of ++ // `https://username:password@www.example.com/`. In such case the ++ // attribute's value should be `https://www.example.com/`. ++ HTTPURLKey = attribute.Key("http.url") ++ ++ // HTTPResendCountKey is the attribute Key conforming to the ++ // "http.resend_count" semantic conventions. It represents the ordinal ++ // number of request resending attempt (for any reason, including ++ // redirects). ++ // ++ // Type: int ++ // RequirementLevel: Recommended (if and only if request was retried.) ++ // Stability: stable ++ // Examples: 3 ++ // Note: The resend count SHOULD be updated each time an HTTP request gets ++ // resent by the client, regardless of what was the cause of the resending ++ // (e.g. redirection, authorization failure, 503 Server Unavailable, ++ // network issues, or any other). ++ HTTPResendCountKey = attribute.Key("http.resend_count") ++) ++ ++// HTTPURL returns an attribute KeyValue conforming to the "http.url" ++// semantic conventions. It represents the full HTTP request URL in the form ++// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not ++// transmitted over HTTP, but if it is known, it should be included ++// nevertheless. ++func HTTPURL(val string) attribute.KeyValue { ++ return HTTPURLKey.String(val) ++} ++ ++// HTTPResendCount returns an attribute KeyValue conforming to the ++// "http.resend_count" semantic conventions. It represents the ordinal number ++// of request resending attempt (for any reason, including redirects). ++func HTTPResendCount(val int) attribute.KeyValue { ++ return HTTPResendCountKey.Int(val) ++} ++ ++// Semantic Convention for HTTP Server ++const ( ++ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" ++ // semantic conventions. It represents the URI scheme identifying the used ++ // protocol. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'http', 'https' ++ HTTPSchemeKey = attribute.Key("http.scheme") ++ ++ // HTTPTargetKey is the attribute Key conforming to the "http.target" ++ // semantic conventions. It represents the full request target as passed in ++ // a HTTP request line or equivalent. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: '/path/12314/?q=ddds' ++ HTTPTargetKey = attribute.Key("http.target") ++ ++ // HTTPRouteKey is the attribute Key conforming to the "http.route" ++ // semantic conventions. It represents the matched route (path template in ++ // the format used by the respective server framework). See note below ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If and only if it's available) ++ // Stability: stable ++ // Examples: '/users/:userID?', '{controller}/{action}/{id?}' ++ // Note: 'http.route' MUST NOT be populated when this is not supported by ++ // the HTTP server framework as the route attribute should have ++ // low-cardinality and the URI path can NOT substitute it. ++ HTTPRouteKey = attribute.Key("http.route") ++ ++ // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" ++ // semantic conventions. It represents the IP address of the original ++ // client behind all proxies, if known (e.g. from ++ // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '83.164.160.102' ++ // Note: This is not necessarily the same as `net.sock.peer.addr`, which ++ // would ++ // identify the network-level peer, which may be a proxy. ++ // ++ // This attribute should be set when a source of information different ++ // from the one used for `net.sock.peer.addr`, is available even if that ++ // other ++ // source just confirms the same value as `net.sock.peer.addr`. ++ // Rationale: For `net.sock.peer.addr`, one typically does not know if it ++ // comes from a proxy, reverse proxy, or the actual client. Setting ++ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that ++ // one is at least somewhat confident that the address is not that of ++ // the closest proxy. ++ HTTPClientIPKey = attribute.Key("http.client_ip") ++) ++ ++// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" ++// semantic conventions. It represents the URI scheme identifying the used ++// protocol. ++func HTTPScheme(val string) attribute.KeyValue { ++ return HTTPSchemeKey.String(val) ++} ++ ++// HTTPTarget returns an attribute KeyValue conforming to the "http.target" ++// semantic conventions. It represents the full request target as passed in a ++// HTTP request line or equivalent. ++func HTTPTarget(val string) attribute.KeyValue { ++ return HTTPTargetKey.String(val) ++} ++ ++// HTTPRoute returns an attribute KeyValue conforming to the "http.route" ++// semantic conventions. It represents the matched route (path template in the ++// format used by the respective server framework). See note below ++func HTTPRoute(val string) attribute.KeyValue { ++ return HTTPRouteKey.String(val) ++} ++ ++// HTTPClientIP returns an attribute KeyValue conforming to the ++// "http.client_ip" semantic conventions. It represents the IP address of the ++// original client behind all proxies, if known (e.g. from ++// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). ++func HTTPClientIP(val string) attribute.KeyValue { ++ return HTTPClientIPKey.String(val) ++} ++ ++// Attributes that exist for multiple DynamoDB request types. ++const ( ++ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the ++ // "aws.dynamodb.table_names" semantic conventions. It represents the keys ++ // in the `RequestItems` object field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Users', 'Cats' ++ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") ++ ++ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the ++ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the ++ // JSON-serialized value of each item in the `ConsumedCapacity` response ++ // field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { ++ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : ++ // { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": ++ // { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number }, "TableName": "string", ++ // "WriteCapacityUnits": number }' ++ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") ++ ++ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to ++ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It ++ // represents the JSON-serialized value of the `ItemCollectionMetrics` ++ // response field. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": ++ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { ++ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], ++ // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, ++ // "SizeEstimateRangeGB": [ number ] } ] }' ++ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") ++ ++ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to ++ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It ++ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` ++ // request parameter. ++ // ++ // Type: double ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1.0, 2.0 ++ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") ++ ++ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming ++ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. ++ // It represents the value of the ++ // `ProvisionedThroughput.WriteCapacityUnits` request parameter. ++ // ++ // Type: double ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1.0, 2.0 ++ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") ++ ++ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the ++ // "aws.dynamodb.consistent_read" semantic conventions. It represents the ++ // value of the `ConsistentRead` request parameter. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") ++ ++ // AWSDynamoDBProjectionKey is the attribute Key conforming to the ++ // "aws.dynamodb.projection" semantic conventions. It represents the value ++ // of the `ProjectionExpression` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Title', 'Title, Price, Color', 'Title, Description, ++ // RelatedItems, ProductReviews' ++ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") ++ ++ // AWSDynamoDBLimitKey is the attribute Key conforming to the ++ // "aws.dynamodb.limit" semantic conventions. It represents the value of ++ // the `Limit` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") ++ ++ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the ++ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the ++ // value of the `AttributesToGet` request parameter. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'lives', 'id' ++ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") ++ ++ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the ++ // "aws.dynamodb.index_name" semantic conventions. It represents the value ++ // of the `IndexName` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'name_to_group' ++ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") ++ ++ // AWSDynamoDBSelectKey is the attribute Key conforming to the ++ // "aws.dynamodb.select" semantic conventions. It represents the value of ++ // the `Select` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'ALL_ATTRIBUTES', 'COUNT' ++ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ++) ++ ++// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the ++// "aws.dynamodb.table_names" semantic conventions. It represents the keys in ++// the `RequestItems` object field. ++func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { ++ return AWSDynamoDBTableNamesKey.StringSlice(val) ++} ++ ++// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to ++// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the ++// JSON-serialized value of each item in the `ConsumedCapacity` response field. ++func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { ++ return AWSDynamoDBConsumedCapacityKey.StringSlice(val) ++} ++ ++// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming ++// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It ++// represents the JSON-serialized value of the `ItemCollectionMetrics` response ++// field. ++func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { ++ return AWSDynamoDBItemCollectionMetricsKey.String(val) ++} ++ ++// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue ++// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic ++// conventions. It represents the value of the ++// `ProvisionedThroughput.ReadCapacityUnits` request parameter. ++func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { ++ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) ++} ++ ++// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue ++// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic ++// conventions. It represents the value of the ++// `ProvisionedThroughput.WriteCapacityUnits` request parameter. ++func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { ++ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) ++} ++ ++// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the ++// "aws.dynamodb.consistent_read" semantic conventions. It represents the value ++// of the `ConsistentRead` request parameter. ++func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { ++ return AWSDynamoDBConsistentReadKey.Bool(val) ++} ++ ++// AWSDynamoDBProjection returns an attribute KeyValue conforming to the ++// "aws.dynamodb.projection" semantic conventions. It represents the value of ++// the `ProjectionExpression` request parameter. ++func AWSDynamoDBProjection(val string) attribute.KeyValue { ++ return AWSDynamoDBProjectionKey.String(val) ++} ++ ++// AWSDynamoDBLimit returns an attribute KeyValue conforming to the ++// "aws.dynamodb.limit" semantic conventions. It represents the value of the ++// `Limit` request parameter. ++func AWSDynamoDBLimit(val int) attribute.KeyValue { ++ return AWSDynamoDBLimitKey.Int(val) ++} ++ ++// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to ++// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the ++// value of the `AttributesToGet` request parameter. ++func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { ++ return AWSDynamoDBAttributesToGetKey.StringSlice(val) ++} ++ ++// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the ++// "aws.dynamodb.index_name" semantic conventions. It represents the value of ++// the `IndexName` request parameter. ++func AWSDynamoDBIndexName(val string) attribute.KeyValue { ++ return AWSDynamoDBIndexNameKey.String(val) ++} ++ ++// AWSDynamoDBSelect returns an attribute KeyValue conforming to the ++// "aws.dynamodb.select" semantic conventions. It represents the value of the ++// `Select` request parameter. ++func AWSDynamoDBSelect(val string) attribute.KeyValue { ++ return AWSDynamoDBSelectKey.String(val) ++} ++ ++// DynamoDB.CreateTable ++const ( ++ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to ++ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It ++ // represents the JSON-serialized value of each item of the ++ // `GlobalSecondaryIndexes` request field ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": ++ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ ++ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { ++ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' ++ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") ++ ++ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to ++ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It ++ // represents the JSON-serialized value of each item of the ++ // `LocalSecondaryIndexes` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "IndexARN": "string", "IndexName": "string", ++ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { ++ // "AttributeName": "string", "KeyType": "string" } ], "Projection": { ++ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' ++ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ++) ++ ++// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue ++// conforming to the "aws.dynamodb.global_secondary_indexes" semantic ++// conventions. It represents the JSON-serialized value of each item of the ++// `GlobalSecondaryIndexes` request field ++func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { ++ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) ++} ++ ++// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming ++// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It ++// represents the JSON-serialized value of each item of the ++// `LocalSecondaryIndexes` request field. ++func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { ++ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) ++} ++ ++// DynamoDB.ListTables ++const ( ++ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the ++ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents ++ // the value of the `ExclusiveStartTableName` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Users', 'CatsTable' ++ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") ++ ++ // AWSDynamoDBTableCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.table_count" semantic conventions. It represents the the ++ // number of items in the `TableNames` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 20 ++ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ++) ++ ++// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming ++// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It ++// represents the value of the `ExclusiveStartTableName` request parameter. ++func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { ++ return AWSDynamoDBExclusiveStartTableKey.String(val) ++} ++ ++// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.table_count" semantic conventions. It represents the the ++// number of items in the `TableNames` response parameter. ++func AWSDynamoDBTableCount(val int) attribute.KeyValue { ++ return AWSDynamoDBTableCountKey.Int(val) ++} ++ ++// DynamoDB.Query ++const ( ++ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the ++ // "aws.dynamodb.scan_forward" semantic conventions. It represents the ++ // value of the `ScanIndexForward` request parameter. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ++) ++ ++// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the ++// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of ++// the `ScanIndexForward` request parameter. ++func AWSDynamoDBScanForward(val bool) attribute.KeyValue { ++ return AWSDynamoDBScanForwardKey.Bool(val) ++} ++ ++// DynamoDB.Scan ++const ( ++ // AWSDynamoDBSegmentKey is the attribute Key conforming to the ++ // "aws.dynamodb.segment" semantic conventions. It represents the value of ++ // the `Segment` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") ++ ++ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the ++ // "aws.dynamodb.total_segments" semantic conventions. It represents the ++ // value of the `TotalSegments` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 100 ++ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") ++ ++ // AWSDynamoDBCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.count" semantic conventions. It represents the value of ++ // the `Count` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") ++ ++ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.scanned_count" semantic conventions. It represents the ++ // value of the `ScannedCount` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 50 ++ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ++) ++ ++// AWSDynamoDBSegment returns an attribute KeyValue conforming to the ++// "aws.dynamodb.segment" semantic conventions. It represents the value of the ++// `Segment` request parameter. ++func AWSDynamoDBSegment(val int) attribute.KeyValue { ++ return AWSDynamoDBSegmentKey.Int(val) ++} ++ ++// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the ++// "aws.dynamodb.total_segments" semantic conventions. It represents the value ++// of the `TotalSegments` request parameter. ++func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { ++ return AWSDynamoDBTotalSegmentsKey.Int(val) ++} ++ ++// AWSDynamoDBCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.count" semantic conventions. It represents the value of the ++// `Count` response parameter. ++func AWSDynamoDBCount(val int) attribute.KeyValue { ++ return AWSDynamoDBCountKey.Int(val) ++} ++ ++// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.scanned_count" semantic conventions. It represents the value ++// of the `ScannedCount` response parameter. ++func AWSDynamoDBScannedCount(val int) attribute.KeyValue { ++ return AWSDynamoDBScannedCountKey.Int(val) ++} ++ ++// DynamoDB.UpdateTable ++const ( ++ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to ++ // the "aws.dynamodb.attribute_definitions" semantic conventions. It ++ // represents the JSON-serialized value of each item in the ++ // `AttributeDefinitions` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' ++ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") ++ ++ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key ++ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic ++ // conventions. It represents the JSON-serialized value of each item in the ++ // the `GlobalSecondaryIndexUpdates` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { ++ // "AttributeName": "string", "KeyType": "string" } ], "Projection": { ++ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, ++ // "ProvisionedThroughput": { "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }' ++ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ++) ++ ++// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming ++// to the "aws.dynamodb.attribute_definitions" semantic conventions. It ++// represents the JSON-serialized value of each item in the ++// `AttributeDefinitions` request field. ++func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { ++ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) ++} ++ ++// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue ++// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic ++// conventions. It represents the JSON-serialized value of each item in the the ++// `GlobalSecondaryIndexUpdates` request field. ++func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { ++ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) ++} ++ ++// Semantic conventions to apply when instrumenting the GraphQL implementation. ++// They map GraphQL operations to attributes on a Span. ++const ( ++ // GraphqlOperationNameKey is the attribute Key conforming to the ++ // "graphql.operation.name" semantic conventions. It represents the name of ++ // the operation being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'findBookByID' ++ GraphqlOperationNameKey = attribute.Key("graphql.operation.name") ++ ++ // GraphqlOperationTypeKey is the attribute Key conforming to the ++ // "graphql.operation.type" semantic conventions. It represents the type of ++ // the operation being executed. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'query', 'mutation', 'subscription' ++ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") ++ ++ // GraphqlDocumentKey is the attribute Key conforming to the ++ // "graphql.document" semantic conventions. It represents the GraphQL ++ // document being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'query findBookByID { bookByID(id: ?) { name } }' ++ // Note: The value may be sanitized to exclude sensitive information. ++ GraphqlDocumentKey = attribute.Key("graphql.document") ++) ++ ++var ( ++ // GraphQL query ++ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") ++ // GraphQL mutation ++ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") ++ // GraphQL subscription ++ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ++) ++ ++// GraphqlOperationName returns an attribute KeyValue conforming to the ++// "graphql.operation.name" semantic conventions. It represents the name of the ++// operation being executed. ++func GraphqlOperationName(val string) attribute.KeyValue { ++ return GraphqlOperationNameKey.String(val) ++} ++ ++// GraphqlDocument returns an attribute KeyValue conforming to the ++// "graphql.document" semantic conventions. It represents the GraphQL document ++// being executed. ++func GraphqlDocument(val string) attribute.KeyValue { ++ return GraphqlDocumentKey.String(val) ++} ++ ++// Semantic convention describing per-message attributes populated on messaging ++// spans or links. ++const ( ++ // MessagingMessageIDKey is the attribute Key conforming to the ++ // "messaging.message.id" semantic conventions. It represents a value used ++ // by the messaging system as an identifier for the message, represented as ++ // a string. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '452a7c7c7c7048c2f887f61572b18fc2' ++ MessagingMessageIDKey = attribute.Key("messaging.message.id") ++ ++ // MessagingMessageConversationIDKey is the attribute Key conforming to the ++ // "messaging.message.conversation_id" semantic conventions. It represents ++ // the [conversation ID](#conversations) identifying the conversation to ++ // which the message belongs, represented as a string. Sometimes called ++ // "Correlation ID". ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MyConversationID' ++ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") ++ ++ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to ++ // the "messaging.message.payload_size_bytes" semantic conventions. It ++ // represents the (uncompressed) size of the message payload in bytes. Also ++ // use this attribute if it is unknown whether the compressed or ++ // uncompressed payload size is reported. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2738 ++ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") ++ ++ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key ++ // conforming to the "messaging.message.payload_compressed_size_bytes" ++ // semantic conventions. It represents the compressed size of the message ++ // payload in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2048 ++ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") ++) ++ ++// MessagingMessageID returns an attribute KeyValue conforming to the ++// "messaging.message.id" semantic conventions. It represents a value used by ++// the messaging system as an identifier for the message, represented as a ++// string. ++func MessagingMessageID(val string) attribute.KeyValue { ++ return MessagingMessageIDKey.String(val) ++} ++ ++// MessagingMessageConversationID returns an attribute KeyValue conforming ++// to the "messaging.message.conversation_id" semantic conventions. It ++// represents the [conversation ID](#conversations) identifying the ++// conversation to which the message belongs, represented as a string. ++// Sometimes called "Correlation ID". ++func MessagingMessageConversationID(val string) attribute.KeyValue { ++ return MessagingMessageConversationIDKey.String(val) ++} ++ ++// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming ++// to the "messaging.message.payload_size_bytes" semantic conventions. It ++// represents the (uncompressed) size of the message payload in bytes. Also use ++// this attribute if it is unknown whether the compressed or uncompressed ++// payload size is reported. ++func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { ++ return MessagingMessagePayloadSizeBytesKey.Int(val) ++} ++ ++// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue ++// conforming to the "messaging.message.payload_compressed_size_bytes" semantic ++// conventions. It represents the compressed size of the message payload in ++// bytes. ++func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { ++ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) ++} ++ ++// Semantic convention for attributes that describe messaging destination on ++// broker ++const ( ++ // MessagingDestinationNameKey is the attribute Key conforming to the ++ // "messaging.destination.name" semantic conventions. It represents the ++ // message destination name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MyQueue', 'MyTopic' ++ // Note: Destination name SHOULD uniquely identify a specific queue, topic ++ // or other entity within the broker. If ++ // the broker does not have such notion, the destination name SHOULD ++ // uniquely identify the broker. ++ MessagingDestinationNameKey = attribute.Key("messaging.destination.name") ++ ++ // MessagingDestinationKindKey is the attribute Key conforming to the ++ // "messaging.destination.kind" semantic conventions. It represents the ++ // kind of message destination ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") ++ ++ // MessagingDestinationTemplateKey is the attribute Key conforming to the ++ // "messaging.destination.template" semantic conventions. It represents the ++ // low cardinality representation of the messaging destination name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/customers/{customerID}' ++ // Note: Destination names could be constructed from templates. An example ++ // would be a destination name involving a user name or product id. ++ // Although the destination name in this case is of high cardinality, the ++ // underlying template is of low cardinality and can be effectively used ++ // for grouping and aggregation. ++ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") ++ ++ // MessagingDestinationTemporaryKey is the attribute Key conforming to the ++ // "messaging.destination.temporary" semantic conventions. It represents a ++ // boolean that is true if the message destination is temporary and might ++ // not exist anymore after messages are processed. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") ++ ++ // MessagingDestinationAnonymousKey is the attribute Key conforming to the ++ // "messaging.destination.anonymous" semantic conventions. It represents a ++ // boolean that is true if the message destination is anonymous (could be ++ // unnamed or have auto-generated name). ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") ++) ++ ++var ( ++ // A message sent to a queue ++ MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") ++ // A message sent to a topic ++ MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ++) ++ ++// MessagingDestinationName returns an attribute KeyValue conforming to the ++// "messaging.destination.name" semantic conventions. It represents the message ++// destination name ++func MessagingDestinationName(val string) attribute.KeyValue { ++ return MessagingDestinationNameKey.String(val) ++} ++ ++// MessagingDestinationTemplate returns an attribute KeyValue conforming to ++// the "messaging.destination.template" semantic conventions. It represents the ++// low cardinality representation of the messaging destination name ++func MessagingDestinationTemplate(val string) attribute.KeyValue { ++ return MessagingDestinationTemplateKey.String(val) ++} ++ ++// MessagingDestinationTemporary returns an attribute KeyValue conforming to ++// the "messaging.destination.temporary" semantic conventions. It represents a ++// boolean that is true if the message destination is temporary and might not ++// exist anymore after messages are processed. ++func MessagingDestinationTemporary(val bool) attribute.KeyValue { ++ return MessagingDestinationTemporaryKey.Bool(val) ++} ++ ++// MessagingDestinationAnonymous returns an attribute KeyValue conforming to ++// the "messaging.destination.anonymous" semantic conventions. It represents a ++// boolean that is true if the message destination is anonymous (could be ++// unnamed or have auto-generated name). ++func MessagingDestinationAnonymous(val bool) attribute.KeyValue { ++ return MessagingDestinationAnonymousKey.Bool(val) ++} ++ ++// Semantic convention for attributes that describe messaging source on broker ++const ( ++ // MessagingSourceNameKey is the attribute Key conforming to the ++ // "messaging.source.name" semantic conventions. It represents the message ++ // source name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MyQueue', 'MyTopic' ++ // Note: Source name SHOULD uniquely identify a specific queue, topic, or ++ // other entity within the broker. If ++ // the broker does not have such notion, the source name SHOULD uniquely ++ // identify the broker. ++ MessagingSourceNameKey = attribute.Key("messaging.source.name") ++ ++ // MessagingSourceKindKey is the attribute Key conforming to the ++ // "messaging.source.kind" semantic conventions. It represents the kind of ++ // message source ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingSourceKindKey = attribute.Key("messaging.source.kind") ++ ++ // MessagingSourceTemplateKey is the attribute Key conforming to the ++ // "messaging.source.template" semantic conventions. It represents the low ++ // cardinality representation of the messaging source name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/customers/{customerID}' ++ // Note: Source names could be constructed from templates. An example would ++ // be a source name involving a user name or product id. Although the ++ // source name in this case is of high cardinality, the underlying template ++ // is of low cardinality and can be effectively used for grouping and ++ // aggregation. ++ MessagingSourceTemplateKey = attribute.Key("messaging.source.template") ++ ++ // MessagingSourceTemporaryKey is the attribute Key conforming to the ++ // "messaging.source.temporary" semantic conventions. It represents a ++ // boolean that is true if the message source is temporary and might not ++ // exist anymore after messages are processed. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") ++ ++ // MessagingSourceAnonymousKey is the attribute Key conforming to the ++ // "messaging.source.anonymous" semantic conventions. It represents a ++ // boolean that is true if the message source is anonymous (could be ++ // unnamed or have auto-generated name). ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") ++) ++ ++var ( ++ // A message received from a queue ++ MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") ++ // A message received from a topic ++ MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") ++) ++ ++// MessagingSourceName returns an attribute KeyValue conforming to the ++// "messaging.source.name" semantic conventions. It represents the message ++// source name ++func MessagingSourceName(val string) attribute.KeyValue { ++ return MessagingSourceNameKey.String(val) ++} ++ ++// MessagingSourceTemplate returns an attribute KeyValue conforming to the ++// "messaging.source.template" semantic conventions. It represents the low ++// cardinality representation of the messaging source name ++func MessagingSourceTemplate(val string) attribute.KeyValue { ++ return MessagingSourceTemplateKey.String(val) ++} ++ ++// MessagingSourceTemporary returns an attribute KeyValue conforming to the ++// "messaging.source.temporary" semantic conventions. It represents a boolean ++// that is true if the message source is temporary and might not exist anymore ++// after messages are processed. ++func MessagingSourceTemporary(val bool) attribute.KeyValue { ++ return MessagingSourceTemporaryKey.Bool(val) ++} ++ ++// MessagingSourceAnonymous returns an attribute KeyValue conforming to the ++// "messaging.source.anonymous" semantic conventions. It represents a boolean ++// that is true if the message source is anonymous (could be unnamed or have ++// auto-generated name). ++func MessagingSourceAnonymous(val bool) attribute.KeyValue { ++ return MessagingSourceAnonymousKey.Bool(val) ++} ++ ++// General attributes used in messaging systems. ++const ( ++ // MessagingSystemKey is the attribute Key conforming to the ++ // "messaging.system" semantic conventions. It represents a string ++ // identifying the messaging system. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' ++ MessagingSystemKey = attribute.Key("messaging.system") ++ ++ // MessagingOperationKey is the attribute Key conforming to the ++ // "messaging.operation" semantic conventions. It represents a string ++ // identifying the kind of messaging operation as defined in the [Operation ++ // names](#operation-names) section above. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: If a custom value is used, it MUST be of low cardinality. ++ MessagingOperationKey = attribute.Key("messaging.operation") ++ ++ // MessagingBatchMessageCountKey is the attribute Key conforming to the ++ // "messaging.batch.message_count" semantic conventions. It represents the ++ // number of messages sent, received, or processed in the scope of the ++ // batching operation. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the span describes an ++ // operation on a batch of messages.) ++ // Stability: stable ++ // Examples: 0, 1, 2 ++ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on ++ // spans that operate with a single message. When a messaging client ++ // library supports both batch and single-message API for the same ++ // operation, instrumentations SHOULD use `messaging.batch.message_count` ++ // for batching APIs and SHOULD NOT use it for single-message APIs. ++ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") ++) ++ ++var ( ++ // publish ++ MessagingOperationPublish = MessagingOperationKey.String("publish") ++ // receive ++ MessagingOperationReceive = MessagingOperationKey.String("receive") ++ // process ++ MessagingOperationProcess = MessagingOperationKey.String("process") ++) ++ ++// MessagingSystem returns an attribute KeyValue conforming to the ++// "messaging.system" semantic conventions. It represents a string identifying ++// the messaging system. ++func MessagingSystem(val string) attribute.KeyValue { ++ return MessagingSystemKey.String(val) ++} ++ ++// MessagingBatchMessageCount returns an attribute KeyValue conforming to ++// the "messaging.batch.message_count" semantic conventions. It represents the ++// number of messages sent, received, or processed in the scope of the batching ++// operation. ++func MessagingBatchMessageCount(val int) attribute.KeyValue { ++ return MessagingBatchMessageCountKey.Int(val) ++} ++ ++// Semantic convention for a consumer of messages received from a messaging ++// system ++const ( ++ // MessagingConsumerIDKey is the attribute Key conforming to the ++ // "messaging.consumer.id" semantic conventions. It represents the ++ // identifier for the consumer receiving a message. For Kafka, set it to ++ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if ++ // both are present, or only `messaging.kafka.consumer.group`. For brokers, ++ // such as RabbitMQ and Artemis, set it to the `client_id` of the client ++ // consuming the message. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'mygroup - client-6' ++ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") ++) ++ ++// MessagingConsumerID returns an attribute KeyValue conforming to the ++// "messaging.consumer.id" semantic conventions. It represents the identifier ++// for the consumer receiving a message. For Kafka, set it to ++// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both ++// are present, or only `messaging.kafka.consumer.group`. For brokers, such as ++// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the ++// message. ++func MessagingConsumerID(val string) attribute.KeyValue { ++ return MessagingConsumerIDKey.String(val) ++} ++ ++// Attributes for RabbitMQ ++const ( ++ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key ++ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic ++ // conventions. It represents the rabbitMQ message routing key. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If not empty.) ++ // Stability: stable ++ // Examples: 'myKey' ++ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") ++) ++ ++// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue ++// conforming to the "messaging.rabbitmq.destination.routing_key" semantic ++// conventions. It represents the rabbitMQ message routing key. ++func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { ++ return MessagingRabbitmqDestinationRoutingKeyKey.String(val) ++} ++ ++// Attributes for Apache Kafka ++const ( ++ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the ++ // "messaging.kafka.message.key" semantic conventions. It represents the ++ // message keys in Kafka are used for grouping alike messages to ensure ++ // they're processed on the same partition. They differ from ++ // `messaging.message.id` in that they're not unique. If the key is `null`, ++ // the attribute MUST NOT be set. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'myKey' ++ // Note: If the key type is not string, it's string representation has to ++ // be supplied for the attribute. If the key has no unambiguous, canonical ++ // string form, don't include its value. ++ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") ++ ++ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the ++ // "messaging.kafka.consumer.group" semantic conventions. It represents the ++ // name of the Kafka Consumer Group that is handling the message. Only ++ // applies to consumers, not producers. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'my-group' ++ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") ++ ++ // MessagingKafkaClientIDKey is the attribute Key conforming to the ++ // "messaging.kafka.client_id" semantic conventions. It represents the ++ // client ID for the Consumer or Producer that is handling the message. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'client-5' ++ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") ++ ++ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to ++ // the "messaging.kafka.destination.partition" semantic conventions. It ++ // represents the partition the message is sent to. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2 ++ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") ++ ++ // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the ++ // "messaging.kafka.source.partition" semantic conventions. It represents ++ // the partition the message is received from. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2 ++ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") ++ ++ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the ++ // "messaging.kafka.message.offset" semantic conventions. It represents the ++ // offset of a record in the corresponding Kafka partition. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") ++ ++ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the ++ // "messaging.kafka.message.tombstone" semantic conventions. It represents ++ // a boolean that is true if the message is a tombstone. ++ // ++ // Type: boolean ++ // RequirementLevel: ConditionallyRequired (If value is `true`. When ++ // missing, the value is assumed to be `false`.) ++ // Stability: stable ++ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") ++) ++ ++// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the ++// "messaging.kafka.message.key" semantic conventions. It represents the ++// message keys in Kafka are used for grouping alike messages to ensure they're ++// processed on the same partition. They differ from `messaging.message.id` in ++// that they're not unique. If the key is `null`, the attribute MUST NOT be ++// set. ++func MessagingKafkaMessageKey(val string) attribute.KeyValue { ++ return MessagingKafkaMessageKeyKey.String(val) ++} ++ ++// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to ++// the "messaging.kafka.consumer.group" semantic conventions. It represents the ++// name of the Kafka Consumer Group that is handling the message. Only applies ++// to consumers, not producers. ++func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { ++ return MessagingKafkaConsumerGroupKey.String(val) ++} ++ ++// MessagingKafkaClientID returns an attribute KeyValue conforming to the ++// "messaging.kafka.client_id" semantic conventions. It represents the client ++// ID for the Consumer or Producer that is handling the message. ++func MessagingKafkaClientID(val string) attribute.KeyValue { ++ return MessagingKafkaClientIDKey.String(val) ++} ++ ++// MessagingKafkaDestinationPartition returns an attribute KeyValue ++// conforming to the "messaging.kafka.destination.partition" semantic ++// conventions. It represents the partition the message is sent to. ++func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { ++ return MessagingKafkaDestinationPartitionKey.Int(val) ++} ++ ++// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to ++// the "messaging.kafka.source.partition" semantic conventions. It represents ++// the partition the message is received from. ++func MessagingKafkaSourcePartition(val int) attribute.KeyValue { ++ return MessagingKafkaSourcePartitionKey.Int(val) ++} ++ ++// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to ++// the "messaging.kafka.message.offset" semantic conventions. It represents the ++// offset of a record in the corresponding Kafka partition. ++func MessagingKafkaMessageOffset(val int) attribute.KeyValue { ++ return MessagingKafkaMessageOffsetKey.Int(val) ++} ++ ++// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming ++// to the "messaging.kafka.message.tombstone" semantic conventions. It ++// represents a boolean that is true if the message is a tombstone. ++func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { ++ return MessagingKafkaMessageTombstoneKey.Bool(val) ++} ++ ++// Attributes for Apache RocketMQ ++const ( ++ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the ++ // "messaging.rocketmq.namespace" semantic conventions. It represents the ++ // namespace of RocketMQ resources, resources in different namespaces are ++ // individual. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myNamespace' ++ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") ++ ++ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the ++ // "messaging.rocketmq.client_group" semantic conventions. It represents ++ // the name of the RocketMQ producer/consumer group that is handling the ++ // message. The client type is identified by the SpanKind. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myConsumerGroup' ++ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") ++ ++ // MessagingRocketmqClientIDKey is the attribute Key conforming to the ++ // "messaging.rocketmq.client_id" semantic conventions. It represents the ++ // unique identifier for each client. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myhost@8742@s8083jm' ++ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") ++ ++ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key ++ // conforming to the "messaging.rocketmq.message.delivery_timestamp" ++ // semantic conventions. It represents the timestamp in milliseconds that ++ // the delay message is expected to be delivered to consumer. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the message type is delay ++ // and delay time level is not specified.) ++ // Stability: stable ++ // Examples: 1665987217045 ++ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") ++ ++ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key ++ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic ++ // conventions. It represents the delay time level for delay message, which ++ // determines the message delay time. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the message type is delay ++ // and delivery timestamp is not specified.) ++ // Stability: stable ++ // Examples: 3 ++ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") ++ ++ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.group" semantic conventions. It represents ++ // the it is essential for FIFO message. Messages that belong to the same ++ // message group are always processed one by one within the same consumer ++ // group. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) ++ // Stability: stable ++ // Examples: 'myMessageGroup' ++ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") ++ ++ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.type" semantic conventions. It represents ++ // the type of message. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") ++ ++ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.tag" semantic conventions. It represents the ++ // secondary classifier of message besides topic. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'tagA' ++ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") ++ ++ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.keys" semantic conventions. It represents ++ // the key(s) of message, another way to mark message besides message id. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'keyA', 'keyB' ++ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") ++ ++ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to ++ // the "messaging.rocketmq.consumption_model" semantic conventions. It ++ // represents the model of message consumption. This only applies to ++ // consumer spans. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ++) ++ ++var ( ++ // Normal message ++ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") ++ // FIFO message ++ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") ++ // Delay message ++ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") ++ // Transaction message ++ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ++) ++ ++var ( ++ // Clustering consumption model ++ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") ++ // Broadcasting consumption model ++ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ++) ++ ++// MessagingRocketmqNamespace returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.namespace" semantic conventions. It represents the ++// namespace of RocketMQ resources, resources in different namespaces are ++// individual. ++func MessagingRocketmqNamespace(val string) attribute.KeyValue { ++ return MessagingRocketmqNamespaceKey.String(val) ++} ++ ++// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.client_group" semantic conventions. It represents ++// the name of the RocketMQ producer/consumer group that is handling the ++// message. The client type is identified by the SpanKind. ++func MessagingRocketmqClientGroup(val string) attribute.KeyValue { ++ return MessagingRocketmqClientGroupKey.String(val) ++} ++ ++// MessagingRocketmqClientID returns an attribute KeyValue conforming to the ++// "messaging.rocketmq.client_id" semantic conventions. It represents the ++// unique identifier for each client. ++func MessagingRocketmqClientID(val string) attribute.KeyValue { ++ return MessagingRocketmqClientIDKey.String(val) ++} ++ ++// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue ++// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic ++// conventions. It represents the timestamp in milliseconds that the delay ++// message is expected to be delivered to consumer. ++func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { ++ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) ++} ++ ++// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue ++// conforming to the "messaging.rocketmq.message.delay_time_level" semantic ++// conventions. It represents the delay time level for delay message, which ++// determines the message delay time. ++func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { ++ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) ++} ++ ++// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.group" semantic conventions. It represents ++// the it is essential for FIFO message. Messages that belong to the same ++// message group are always processed one by one within the same consumer ++// group. ++func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { ++ return MessagingRocketmqMessageGroupKey.String(val) ++} ++ ++// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.tag" semantic conventions. It represents the ++// secondary classifier of message besides topic. ++func MessagingRocketmqMessageTag(val string) attribute.KeyValue { ++ return MessagingRocketmqMessageTagKey.String(val) ++} ++ ++// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.keys" semantic conventions. It represents ++// the key(s) of message, another way to mark message besides message id. ++func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { ++ return MessagingRocketmqMessageKeysKey.StringSlice(val) ++} ++ ++// Semantic conventions for remote procedure calls. ++const ( ++ // RPCSystemKey is the attribute Key conforming to the "rpc.system" ++ // semantic conventions. It represents a string identifying the remoting ++ // system. See below for a list of well-known identifiers. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ RPCSystemKey = attribute.Key("rpc.system") ++ ++ // RPCServiceKey is the attribute Key conforming to the "rpc.service" ++ // semantic conventions. It represents the full (logical) name of the ++ // service being called, including its package name, if applicable. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'myservice.EchoService' ++ // Note: This is the logical name of the service from the RPC interface ++ // perspective, which can be different from the name of any implementing ++ // class. The `code.namespace` attribute may be used to store the latter ++ // (despite the attribute name, it may include a class name; e.g., class ++ // with method actually executing the call on the server side, RPC client ++ // stub class on the client side). ++ RPCServiceKey = attribute.Key("rpc.service") ++ ++ // RPCMethodKey is the attribute Key conforming to the "rpc.method" ++ // semantic conventions. It represents the name of the (logical) method ++ // being called, must be equal to the $method part in the span name. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'exampleMethod' ++ // Note: This is the logical name of the method from the RPC interface ++ // perspective, which can be different from the name of any implementing ++ // method/function. The `code.function` attribute may be used to store the ++ // latter (e.g., method actually executing the call on the server side, RPC ++ // client stub method on the client side). ++ RPCMethodKey = attribute.Key("rpc.method") ++) ++ ++var ( ++ // gRPC ++ RPCSystemGRPC = RPCSystemKey.String("grpc") ++ // Java RMI ++ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") ++ // .NET WCF ++ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") ++ // Apache Dubbo ++ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ++) ++ ++// RPCService returns an attribute KeyValue conforming to the "rpc.service" ++// semantic conventions. It represents the full (logical) name of the service ++// being called, including its package name, if applicable. ++func RPCService(val string) attribute.KeyValue { ++ return RPCServiceKey.String(val) ++} ++ ++// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" ++// semantic conventions. It represents the name of the (logical) method being ++// called, must be equal to the $method part in the span name. ++func RPCMethod(val string) attribute.KeyValue { ++ return RPCMethodKey.String(val) ++} ++ ++// Tech-specific attributes for gRPC. ++const ( ++ // RPCGRPCStatusCodeKey is the attribute Key conforming to the ++ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric ++ // status ++ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of ++ // the gRPC request. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ++) ++ ++var ( ++ // OK ++ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) ++ // CANCELLED ++ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) ++ // UNKNOWN ++ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) ++ // INVALID_ARGUMENT ++ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) ++ // DEADLINE_EXCEEDED ++ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) ++ // NOT_FOUND ++ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) ++ // ALREADY_EXISTS ++ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) ++ // PERMISSION_DENIED ++ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) ++ // RESOURCE_EXHAUSTED ++ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) ++ // FAILED_PRECONDITION ++ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) ++ // ABORTED ++ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) ++ // OUT_OF_RANGE ++ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) ++ // UNIMPLEMENTED ++ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) ++ // INTERNAL ++ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) ++ // UNAVAILABLE ++ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) ++ // DATA_LOSS ++ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) ++ // UNAUTHENTICATED ++ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ++) ++ ++// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). ++const ( ++ // RPCJsonrpcVersionKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol ++ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 ++ // does not specify this, the value can be omitted. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If other than the default ++ // version (`1.0`)) ++ // Stability: stable ++ // Examples: '2.0', '1.0' ++ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") ++ ++ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` ++ // property of request or response. Since protocol allows id to be int, ++ // string, `null` or missing (for notifications), value is expected to be ++ // cast to string for simplicity. Use empty string in case of `null` value. ++ // Omit entirely if this is a notification. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '10', 'request-7', '' ++ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") ++ ++ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.error_code" semantic conventions. It represents the ++ // `error.code` property of response if it is an error response. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If response is not successful.) ++ // Stability: stable ++ // Examples: -32700, 100 ++ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") ++ ++ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.error_message" semantic conventions. It represents the ++ // `error.message` property of response if it is an error response. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Parse error', 'User already exists' ++ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ++) ++ ++// RPCJsonrpcVersion returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.version" semantic conventions. It represents the protocol ++// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 ++// does not specify this, the value can be omitted. ++func RPCJsonrpcVersion(val string) attribute.KeyValue { ++ return RPCJsonrpcVersionKey.String(val) ++} ++ ++// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` ++// property of request or response. Since protocol allows id to be int, string, ++// `null` or missing (for notifications), value is expected to be cast to ++// string for simplicity. Use empty string in case of `null` value. Omit ++// entirely if this is a notification. ++func RPCJsonrpcRequestID(val string) attribute.KeyValue { ++ return RPCJsonrpcRequestIDKey.String(val) ++} ++ ++// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.error_code" semantic conventions. It represents the ++// `error.code` property of response if it is an error response. ++func RPCJsonrpcErrorCode(val int) attribute.KeyValue { ++ return RPCJsonrpcErrorCodeKey.Int(val) ++} ++ ++// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.error_message" semantic conventions. It represents the ++// `error.message` property of response if it is an error response. ++func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { ++ return RPCJsonrpcErrorMessageKey.String(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go +new file mode 100644 +index 00000000000..e6cf8951053 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go +@@ -0,0 +1,1877 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// These attributes may be used to describe the client in a connection-based ++// network interaction where there is one side that initiates the connection ++// (the client is the side that initiates the connection). This covers all TCP ++// network interactions since TCP is connection-based and one side initiates ++// the connection (an exception is made for peer-to-peer communication over TCP ++// where the "user-facing" surface of the protocol / API does not expose a ++// clear notion of client and server). This also covers UDP network ++// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) ++// and DNS. ++const ( ++ // ClientAddressKey is the attribute Key conforming to the "client.address" ++ // semantic conventions. It represents the client address - unix domain ++ // socket name, IPv4 or IPv6 address. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/tmp/my.sock', '10.1.2.80' ++ // Note: When observed from the server side, and when communicating through ++ // an intermediary, `client.address` SHOULD represent client address behind ++ // any intermediaries (e.g. proxies) if it's available. ++ ClientAddressKey = attribute.Key("client.address") ++ ++ // ClientPortKey is the attribute Key conforming to the "client.port" ++ // semantic conventions. It represents the client port number ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 65123 ++ // Note: When observed from the server side, and when communicating through ++ // an intermediary, `client.port` SHOULD represent client port behind any ++ // intermediaries (e.g. proxies) if it's available. ++ ClientPortKey = attribute.Key("client.port") ++ ++ // ClientSocketAddressKey is the attribute Key conforming to the ++ // "client.socket.address" semantic conventions. It represents the ++ // immediate client peer address - unix domain socket name, IPv4 or IPv6 ++ // address. ++ // ++ // Type: string ++ // RequirementLevel: Recommended (If different than `client.address`.) ++ // Stability: stable ++ // Examples: '/tmp/my.sock', '127.0.0.1' ++ ClientSocketAddressKey = attribute.Key("client.socket.address") ++ ++ // ClientSocketPortKey is the attribute Key conforming to the ++ // "client.socket.port" semantic conventions. It represents the immediate ++ // client peer port number ++ // ++ // Type: int ++ // RequirementLevel: Recommended (If different than `client.port`.) ++ // Stability: stable ++ // Examples: 35555 ++ ClientSocketPortKey = attribute.Key("client.socket.port") ++) ++ ++// ClientAddress returns an attribute KeyValue conforming to the ++// "client.address" semantic conventions. It represents the client address - ++// unix domain socket name, IPv4 or IPv6 address. ++func ClientAddress(val string) attribute.KeyValue { ++ return ClientAddressKey.String(val) ++} ++ ++// ClientPort returns an attribute KeyValue conforming to the "client.port" ++// semantic conventions. It represents the client port number ++func ClientPort(val int) attribute.KeyValue { ++ return ClientPortKey.Int(val) ++} ++ ++// ClientSocketAddress returns an attribute KeyValue conforming to the ++// "client.socket.address" semantic conventions. It represents the immediate ++// client peer address - unix domain socket name, IPv4 or IPv6 address. ++func ClientSocketAddress(val string) attribute.KeyValue { ++ return ClientSocketAddressKey.String(val) ++} ++ ++// ClientSocketPort returns an attribute KeyValue conforming to the ++// "client.socket.port" semantic conventions. It represents the immediate ++// client peer port number ++func ClientSocketPort(val int) attribute.KeyValue { ++ return ClientSocketPortKey.Int(val) ++} ++ ++// Describes deprecated HTTP attributes. ++const ( ++ // HTTPMethodKey is the attribute Key conforming to the "http.method" ++ // semantic conventions. It represents the deprecated, use ++ // `http.request.method` instead. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'GET', 'POST', 'HEAD' ++ HTTPMethodKey = attribute.Key("http.method") ++ ++ // HTTPStatusCodeKey is the attribute Key conforming to the ++ // "http.status_code" semantic conventions. It represents the deprecated, ++ // use `http.response.status_code` instead. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 200 ++ HTTPStatusCodeKey = attribute.Key("http.status_code") ++ ++ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" ++ // semantic conventions. It represents the deprecated, use `url.scheme` ++ // instead. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'http', 'https' ++ HTTPSchemeKey = attribute.Key("http.scheme") ++ ++ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic ++ // conventions. It represents the deprecated, use `url.full` instead. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' ++ HTTPURLKey = attribute.Key("http.url") ++ ++ // HTTPTargetKey is the attribute Key conforming to the "http.target" ++ // semantic conventions. It represents the deprecated, use `url.path` and ++ // `url.query` instead. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '/search?q=OpenTelemetry#SemConv' ++ HTTPTargetKey = attribute.Key("http.target") ++ ++ // HTTPRequestContentLengthKey is the attribute Key conforming to the ++ // "http.request_content_length" semantic conventions. It represents the ++ // deprecated, use `http.request.body.size` instead. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 3495 ++ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") ++ ++ // HTTPResponseContentLengthKey is the attribute Key conforming to the ++ // "http.response_content_length" semantic conventions. It represents the ++ // deprecated, use `http.response.body.size` instead. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 3495 ++ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ++) ++ ++// HTTPMethod returns an attribute KeyValue conforming to the "http.method" ++// semantic conventions. It represents the deprecated, use ++// `http.request.method` instead. ++func HTTPMethod(val string) attribute.KeyValue { ++ return HTTPMethodKey.String(val) ++} ++ ++// HTTPStatusCode returns an attribute KeyValue conforming to the ++// "http.status_code" semantic conventions. It represents the deprecated, use ++// `http.response.status_code` instead. ++func HTTPStatusCode(val int) attribute.KeyValue { ++ return HTTPStatusCodeKey.Int(val) ++} ++ ++// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" ++// semantic conventions. It represents the deprecated, use `url.scheme` ++// instead. ++func HTTPScheme(val string) attribute.KeyValue { ++ return HTTPSchemeKey.String(val) ++} ++ ++// HTTPURL returns an attribute KeyValue conforming to the "http.url" ++// semantic conventions. It represents the deprecated, use `url.full` instead. ++func HTTPURL(val string) attribute.KeyValue { ++ return HTTPURLKey.String(val) ++} ++ ++// HTTPTarget returns an attribute KeyValue conforming to the "http.target" ++// semantic conventions. It represents the deprecated, use `url.path` and ++// `url.query` instead. ++func HTTPTarget(val string) attribute.KeyValue { ++ return HTTPTargetKey.String(val) ++} ++ ++// HTTPRequestContentLength returns an attribute KeyValue conforming to the ++// "http.request_content_length" semantic conventions. It represents the ++// deprecated, use `http.request.body.size` instead. ++func HTTPRequestContentLength(val int) attribute.KeyValue { ++ return HTTPRequestContentLengthKey.Int(val) ++} ++ ++// HTTPResponseContentLength returns an attribute KeyValue conforming to the ++// "http.response_content_length" semantic conventions. It represents the ++// deprecated, use `http.response.body.size` instead. ++func HTTPResponseContentLength(val int) attribute.KeyValue { ++ return HTTPResponseContentLengthKey.Int(val) ++} ++ ++// These attributes may be used for any network related operation. ++const ( ++ // NetSockPeerNameKey is the attribute Key conforming to the ++ // "net.sock.peer.name" semantic conventions. It represents the deprecated, ++ // use `server.socket.domain` on client spans. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '/var/my.sock' ++ NetSockPeerNameKey = attribute.Key("net.sock.peer.name") ++ ++ // NetSockPeerAddrKey is the attribute Key conforming to the ++ // "net.sock.peer.addr" semantic conventions. It represents the deprecated, ++ // use `server.socket.address` on client spans and `client.socket.address` ++ // on server spans. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '192.168.0.1' ++ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") ++ ++ // NetSockPeerPortKey is the attribute Key conforming to the ++ // "net.sock.peer.port" semantic conventions. It represents the deprecated, ++ // use `server.socket.port` on client spans and `client.socket.port` on ++ // server spans. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 65531 ++ NetSockPeerPortKey = attribute.Key("net.sock.peer.port") ++ ++ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" ++ // semantic conventions. It represents the deprecated, use `server.address` ++ // on client spans and `client.address` on server spans. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'example.com' ++ NetPeerNameKey = attribute.Key("net.peer.name") ++ ++ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" ++ // semantic conventions. It represents the deprecated, use `server.port` on ++ // client spans and `client.port` on server spans. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 8080 ++ NetPeerPortKey = attribute.Key("net.peer.port") ++ ++ // NetHostNameKey is the attribute Key conforming to the "net.host.name" ++ // semantic conventions. It represents the deprecated, use ++ // `server.address`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'example.com' ++ NetHostNameKey = attribute.Key("net.host.name") ++ ++ // NetHostPortKey is the attribute Key conforming to the "net.host.port" ++ // semantic conventions. It represents the deprecated, use `server.port`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 8080 ++ NetHostPortKey = attribute.Key("net.host.port") ++ ++ // NetSockHostAddrKey is the attribute Key conforming to the ++ // "net.sock.host.addr" semantic conventions. It represents the deprecated, ++ // use `server.socket.address`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '/var/my.sock' ++ NetSockHostAddrKey = attribute.Key("net.sock.host.addr") ++ ++ // NetSockHostPortKey is the attribute Key conforming to the ++ // "net.sock.host.port" semantic conventions. It represents the deprecated, ++ // use `server.socket.port`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 8080 ++ NetSockHostPortKey = attribute.Key("net.sock.host.port") ++ ++ // NetTransportKey is the attribute Key conforming to the "net.transport" ++ // semantic conventions. It represents the deprecated, use ++ // `network.transport`. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ NetTransportKey = attribute.Key("net.transport") ++ ++ // NetProtocolNameKey is the attribute Key conforming to the ++ // "net.protocol.name" semantic conventions. It represents the deprecated, ++ // use `network.protocol.name`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'amqp', 'http', 'mqtt' ++ NetProtocolNameKey = attribute.Key("net.protocol.name") ++ ++ // NetProtocolVersionKey is the attribute Key conforming to the ++ // "net.protocol.version" semantic conventions. It represents the ++ // deprecated, use `network.protocol.version`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '3.1.1' ++ NetProtocolVersionKey = attribute.Key("net.protocol.version") ++ ++ // NetSockFamilyKey is the attribute Key conforming to the ++ // "net.sock.family" semantic conventions. It represents the deprecated, ++ // use `network.transport` and `network.type`. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ NetSockFamilyKey = attribute.Key("net.sock.family") ++) ++ ++var ( ++ // ip_tcp ++ NetTransportTCP = NetTransportKey.String("ip_tcp") ++ // ip_udp ++ NetTransportUDP = NetTransportKey.String("ip_udp") ++ // Named or anonymous pipe ++ NetTransportPipe = NetTransportKey.String("pipe") ++ // In-process communication ++ NetTransportInProc = NetTransportKey.String("inproc") ++ // Something else (non IP-based) ++ NetTransportOther = NetTransportKey.String("other") ++) ++ ++var ( ++ // IPv4 address ++ NetSockFamilyInet = NetSockFamilyKey.String("inet") ++ // IPv6 address ++ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") ++ // Unix domain socket path ++ NetSockFamilyUnix = NetSockFamilyKey.String("unix") ++) ++ ++// NetSockPeerName returns an attribute KeyValue conforming to the ++// "net.sock.peer.name" semantic conventions. It represents the deprecated, use ++// `server.socket.domain` on client spans. ++func NetSockPeerName(val string) attribute.KeyValue { ++ return NetSockPeerNameKey.String(val) ++} ++ ++// NetSockPeerAddr returns an attribute KeyValue conforming to the ++// "net.sock.peer.addr" semantic conventions. It represents the deprecated, use ++// `server.socket.address` on client spans and `client.socket.address` on ++// server spans. ++func NetSockPeerAddr(val string) attribute.KeyValue { ++ return NetSockPeerAddrKey.String(val) ++} ++ ++// NetSockPeerPort returns an attribute KeyValue conforming to the ++// "net.sock.peer.port" semantic conventions. It represents the deprecated, use ++// `server.socket.port` on client spans and `client.socket.port` on server ++// spans. ++func NetSockPeerPort(val int) attribute.KeyValue { ++ return NetSockPeerPortKey.Int(val) ++} ++ ++// NetPeerName returns an attribute KeyValue conforming to the ++// "net.peer.name" semantic conventions. It represents the deprecated, use ++// `server.address` on client spans and `client.address` on server spans. ++func NetPeerName(val string) attribute.KeyValue { ++ return NetPeerNameKey.String(val) ++} ++ ++// NetPeerPort returns an attribute KeyValue conforming to the ++// "net.peer.port" semantic conventions. It represents the deprecated, use ++// `server.port` on client spans and `client.port` on server spans. ++func NetPeerPort(val int) attribute.KeyValue { ++ return NetPeerPortKey.Int(val) ++} ++ ++// NetHostName returns an attribute KeyValue conforming to the ++// "net.host.name" semantic conventions. It represents the deprecated, use ++// `server.address`. ++func NetHostName(val string) attribute.KeyValue { ++ return NetHostNameKey.String(val) ++} ++ ++// NetHostPort returns an attribute KeyValue conforming to the ++// "net.host.port" semantic conventions. It represents the deprecated, use ++// `server.port`. ++func NetHostPort(val int) attribute.KeyValue { ++ return NetHostPortKey.Int(val) ++} ++ ++// NetSockHostAddr returns an attribute KeyValue conforming to the ++// "net.sock.host.addr" semantic conventions. It represents the deprecated, use ++// `server.socket.address`. ++func NetSockHostAddr(val string) attribute.KeyValue { ++ return NetSockHostAddrKey.String(val) ++} ++ ++// NetSockHostPort returns an attribute KeyValue conforming to the ++// "net.sock.host.port" semantic conventions. It represents the deprecated, use ++// `server.socket.port`. ++func NetSockHostPort(val int) attribute.KeyValue { ++ return NetSockHostPortKey.Int(val) ++} ++ ++// NetProtocolName returns an attribute KeyValue conforming to the ++// "net.protocol.name" semantic conventions. It represents the deprecated, use ++// `network.protocol.name`. ++func NetProtocolName(val string) attribute.KeyValue { ++ return NetProtocolNameKey.String(val) ++} ++ ++// NetProtocolVersion returns an attribute KeyValue conforming to the ++// "net.protocol.version" semantic conventions. It represents the deprecated, ++// use `network.protocol.version`. ++func NetProtocolVersion(val string) attribute.KeyValue { ++ return NetProtocolVersionKey.String(val) ++} ++ ++// These attributes may be used to describe the receiver of a network ++// exchange/packet. These should be used when there is no client/server ++// relationship between the two sides, or when that relationship is unknown. ++// This covers low-level network interactions (e.g. packet tracing) where you ++// don't know if there was a connection or which side initiated it. This also ++// covers unidirectional UDP flows and peer-to-peer communication where the ++// "user-facing" surface of the protocol / API does not expose a clear notion ++// of client and server. ++const ( ++ // DestinationDomainKey is the attribute Key conforming to the ++ // "destination.domain" semantic conventions. It represents the domain name ++ // of the destination system. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'foo.example.com' ++ // Note: This value may be a host name, a fully qualified domain name, or ++ // another host naming format. ++ DestinationDomainKey = attribute.Key("destination.domain") ++ ++ // DestinationAddressKey is the attribute Key conforming to the ++ // "destination.address" semantic conventions. It represents the peer ++ // address, for example IP address or UNIX socket name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '10.5.3.2' ++ DestinationAddressKey = attribute.Key("destination.address") ++ ++ // DestinationPortKey is the attribute Key conforming to the ++ // "destination.port" semantic conventions. It represents the peer port ++ // number ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3389, 2888 ++ DestinationPortKey = attribute.Key("destination.port") ++) ++ ++// DestinationDomain returns an attribute KeyValue conforming to the ++// "destination.domain" semantic conventions. It represents the domain name of ++// the destination system. ++func DestinationDomain(val string) attribute.KeyValue { ++ return DestinationDomainKey.String(val) ++} ++ ++// DestinationAddress returns an attribute KeyValue conforming to the ++// "destination.address" semantic conventions. It represents the peer address, ++// for example IP address or UNIX socket name. ++func DestinationAddress(val string) attribute.KeyValue { ++ return DestinationAddressKey.String(val) ++} ++ ++// DestinationPort returns an attribute KeyValue conforming to the ++// "destination.port" semantic conventions. It represents the peer port number ++func DestinationPort(val int) attribute.KeyValue { ++ return DestinationPortKey.Int(val) ++} ++ ++// Describes HTTP attributes. ++const ( ++ // HTTPRequestMethodKey is the attribute Key conforming to the ++ // "http.request.method" semantic conventions. It represents the hTTP ++ // request method. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'GET', 'POST', 'HEAD' ++ // Note: HTTP request method value SHOULD be "known" to the ++ // instrumentation. ++ // By default, this convention defines "known" methods as the ones listed ++ // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) ++ // and the PATCH method defined in ++ // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). ++ // ++ // If the HTTP request method is not known to instrumentation, it MUST set ++ // the `http.request.method` attribute to `_OTHER` and, except if reporting ++ // a metric, MUST ++ // set the exact method received in the request line as value of the ++ // `http.request.method_original` attribute. ++ // ++ // If the HTTP instrumentation could end up converting valid HTTP request ++ // methods to `_OTHER`, then it MUST provide a way to override ++ // the list of known HTTP methods. If this override is done via environment ++ // variable, then the environment variable MUST be named ++ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated ++ // list of case-sensitive known HTTP methods ++ // (this list MUST be a full override of the default known method, it is ++ // not a list of known methods in addition to the defaults). ++ // ++ // HTTP method names are case-sensitive and `http.request.method` attribute ++ // value MUST match a known HTTP method name exactly. ++ // Instrumentations for specific web frameworks that consider HTTP methods ++ // to be case insensitive, SHOULD populate a canonical equivalent. ++ // Tracing instrumentations that do so, MUST also set ++ // `http.request.method_original` to the original value. ++ HTTPRequestMethodKey = attribute.Key("http.request.method") ++ ++ // HTTPResponseStatusCodeKey is the attribute Key conforming to the ++ // "http.response.status_code" semantic conventions. It represents the ++ // [HTTP response status ++ // code](https://tools.ietf.org/html/rfc7231#section-6). ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If and only if one was ++ // received/sent.) ++ // Stability: stable ++ // Examples: 200 ++ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") ++) ++ ++var ( ++ // CONNECT method ++ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") ++ // DELETE method ++ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") ++ // GET method ++ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") ++ // HEAD method ++ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") ++ // OPTIONS method ++ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") ++ // PATCH method ++ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") ++ // POST method ++ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") ++ // PUT method ++ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") ++ // TRACE method ++ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") ++ // Any HTTP method that the instrumentation has no prior knowledge of ++ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") ++) ++ ++// HTTPResponseStatusCode returns an attribute KeyValue conforming to the ++// "http.response.status_code" semantic conventions. It represents the [HTTP ++// response status code](https://tools.ietf.org/html/rfc7231#section-6). ++func HTTPResponseStatusCode(val int) attribute.KeyValue { ++ return HTTPResponseStatusCodeKey.Int(val) ++} ++ ++// HTTP Server attributes ++const ( ++ // HTTPRouteKey is the attribute Key conforming to the "http.route" ++ // semantic conventions. It represents the matched route (path template in ++ // the format used by the respective server framework). See note below ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If and only if it's available) ++ // Stability: stable ++ // Examples: '/users/:userID?', '{controller}/{action}/{id?}' ++ // Note: MUST NOT be populated when this is not supported by the HTTP ++ // server framework as the route attribute should have low-cardinality and ++ // the URI path can NOT substitute it. ++ // SHOULD include the [application ++ // root](/docs/http/http-spans.md#http-server-definitions) if there is one. ++ HTTPRouteKey = attribute.Key("http.route") ++) ++ ++// HTTPRoute returns an attribute KeyValue conforming to the "http.route" ++// semantic conventions. It represents the matched route (path template in the ++// format used by the respective server framework). See note below ++func HTTPRoute(val string) attribute.KeyValue { ++ return HTTPRouteKey.String(val) ++} ++ ++// Attributes for Events represented using Log Records. ++const ( ++ // EventNameKey is the attribute Key conforming to the "event.name" ++ // semantic conventions. It represents the name identifies the event. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'click', 'exception' ++ EventNameKey = attribute.Key("event.name") ++ ++ // EventDomainKey is the attribute Key conforming to the "event.domain" ++ // semantic conventions. It represents the domain identifies the business ++ // context for the events. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: Events across different domains may have same `event.name`, yet be ++ // unrelated events. ++ EventDomainKey = attribute.Key("event.domain") ++) ++ ++var ( ++ // Events from browser apps ++ EventDomainBrowser = EventDomainKey.String("browser") ++ // Events from mobile apps ++ EventDomainDevice = EventDomainKey.String("device") ++ // Events from Kubernetes ++ EventDomainK8S = EventDomainKey.String("k8s") ++) ++ ++// EventName returns an attribute KeyValue conforming to the "event.name" ++// semantic conventions. It represents the name identifies the event. ++func EventName(val string) attribute.KeyValue { ++ return EventNameKey.String(val) ++} ++ ++// The attributes described in this section are rather generic. They may be ++// used in any Log Record they apply to. ++const ( ++ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" ++ // semantic conventions. It represents a unique identifier for the Log ++ // Record. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' ++ // Note: If an id is provided, other log records with the same id will be ++ // considered duplicates and can be removed safely. This means, that two ++ // distinguishable log records MUST have different values. ++ // The id MAY be an [Universally Unique Lexicographically Sortable ++ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers ++ // (e.g. UUID) may be used as needed. ++ LogRecordUIDKey = attribute.Key("log.record.uid") ++) ++ ++// LogRecordUID returns an attribute KeyValue conforming to the ++// "log.record.uid" semantic conventions. It represents a unique identifier for ++// the Log Record. ++func LogRecordUID(val string) attribute.KeyValue { ++ return LogRecordUIDKey.String(val) ++} ++ ++// Describes Log attributes ++const ( ++ // LogIostreamKey is the attribute Key conforming to the "log.iostream" ++ // semantic conventions. It represents the stream associated with the log. ++ // See below for a list of well-known values. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ LogIostreamKey = attribute.Key("log.iostream") ++) ++ ++var ( ++ // Logs from stdout stream ++ LogIostreamStdout = LogIostreamKey.String("stdout") ++ // Events from stderr stream ++ LogIostreamStderr = LogIostreamKey.String("stderr") ++) ++ ++// A file to which log was emitted. ++const ( ++ // LogFileNameKey is the attribute Key conforming to the "log.file.name" ++ // semantic conventions. It represents the basename of the file. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'audit.log' ++ LogFileNameKey = attribute.Key("log.file.name") ++ ++ // LogFilePathKey is the attribute Key conforming to the "log.file.path" ++ // semantic conventions. It represents the full path to the file. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/var/log/mysql/audit.log' ++ LogFilePathKey = attribute.Key("log.file.path") ++ ++ // LogFileNameResolvedKey is the attribute Key conforming to the ++ // "log.file.name_resolved" semantic conventions. It represents the ++ // basename of the file, with symlinks resolved. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'uuid.log' ++ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") ++ ++ // LogFilePathResolvedKey is the attribute Key conforming to the ++ // "log.file.path_resolved" semantic conventions. It represents the full ++ // path to the file, with symlinks resolved. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/var/lib/docker/uuid.log' ++ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") ++) ++ ++// LogFileName returns an attribute KeyValue conforming to the ++// "log.file.name" semantic conventions. It represents the basename of the ++// file. ++func LogFileName(val string) attribute.KeyValue { ++ return LogFileNameKey.String(val) ++} ++ ++// LogFilePath returns an attribute KeyValue conforming to the ++// "log.file.path" semantic conventions. It represents the full path to the ++// file. ++func LogFilePath(val string) attribute.KeyValue { ++ return LogFilePathKey.String(val) ++} ++ ++// LogFileNameResolved returns an attribute KeyValue conforming to the ++// "log.file.name_resolved" semantic conventions. It represents the basename of ++// the file, with symlinks resolved. ++func LogFileNameResolved(val string) attribute.KeyValue { ++ return LogFileNameResolvedKey.String(val) ++} ++ ++// LogFilePathResolved returns an attribute KeyValue conforming to the ++// "log.file.path_resolved" semantic conventions. It represents the full path ++// to the file, with symlinks resolved. ++func LogFilePathResolved(val string) attribute.KeyValue { ++ return LogFilePathResolvedKey.String(val) ++} ++ ++// Describes JVM memory metric attributes. ++const ( ++ // TypeKey is the attribute Key conforming to the "type" semantic ++ // conventions. It represents the type of memory. ++ // ++ // Type: Enum ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'heap', 'non_heap' ++ TypeKey = attribute.Key("type") ++ ++ // PoolKey is the attribute Key conforming to the "pool" semantic ++ // conventions. It represents the name of the memory pool. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' ++ // Note: Pool names are generally obtained via ++ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). ++ PoolKey = attribute.Key("pool") ++) ++ ++var ( ++ // Heap memory ++ TypeHeap = TypeKey.String("heap") ++ // Non-heap memory ++ TypeNonHeap = TypeKey.String("non_heap") ++) ++ ++// Pool returns an attribute KeyValue conforming to the "pool" semantic ++// conventions. It represents the name of the memory pool. ++func Pool(val string) attribute.KeyValue { ++ return PoolKey.String(val) ++} ++ ++// These attributes may be used to describe the server in a connection-based ++// network interaction where there is one side that initiates the connection ++// (the client is the side that initiates the connection). This covers all TCP ++// network interactions since TCP is connection-based and one side initiates ++// the connection (an exception is made for peer-to-peer communication over TCP ++// where the "user-facing" surface of the protocol / API does not expose a ++// clear notion of client and server). This also covers UDP network ++// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) ++// and DNS. ++const ( ++ // ServerAddressKey is the attribute Key conforming to the "server.address" ++ // semantic conventions. It represents the logical server hostname, matches ++ // server FQDN if available, and IP or socket address if FQDN is not known. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'example.com' ++ ServerAddressKey = attribute.Key("server.address") ++ ++ // ServerPortKey is the attribute Key conforming to the "server.port" ++ // semantic conventions. It represents the logical server port number ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 80, 8080, 443 ++ ServerPortKey = attribute.Key("server.port") ++ ++ // ServerSocketDomainKey is the attribute Key conforming to the ++ // "server.socket.domain" semantic conventions. It represents the domain ++ // name of an immediate peer. ++ // ++ // Type: string ++ // RequirementLevel: Recommended (If different than `server.address`.) ++ // Stability: stable ++ // Examples: 'proxy.example.com' ++ // Note: Typically observed from the client side, and represents a proxy or ++ // other intermediary domain name. ++ ServerSocketDomainKey = attribute.Key("server.socket.domain") ++ ++ // ServerSocketAddressKey is the attribute Key conforming to the ++ // "server.socket.address" semantic conventions. It represents the physical ++ // server IP address or Unix socket address. If set from the client, should ++ // simply use the socket's peer address, and not attempt to find any actual ++ // server IP (i.e., if set from client, this may represent some proxy ++ // server instead of the logical server). ++ // ++ // Type: string ++ // RequirementLevel: Recommended (If different than `server.address`.) ++ // Stability: stable ++ // Examples: '10.5.3.2' ++ ServerSocketAddressKey = attribute.Key("server.socket.address") ++ ++ // ServerSocketPortKey is the attribute Key conforming to the ++ // "server.socket.port" semantic conventions. It represents the physical ++ // server port. ++ // ++ // Type: int ++ // RequirementLevel: Recommended (If different than `server.port`.) ++ // Stability: stable ++ // Examples: 16456 ++ ServerSocketPortKey = attribute.Key("server.socket.port") ++) ++ ++// ServerAddress returns an attribute KeyValue conforming to the ++// "server.address" semantic conventions. It represents the logical server ++// hostname, matches server FQDN if available, and IP or socket address if FQDN ++// is not known. ++func ServerAddress(val string) attribute.KeyValue { ++ return ServerAddressKey.String(val) ++} ++ ++// ServerPort returns an attribute KeyValue conforming to the "server.port" ++// semantic conventions. It represents the logical server port number ++func ServerPort(val int) attribute.KeyValue { ++ return ServerPortKey.Int(val) ++} ++ ++// ServerSocketDomain returns an attribute KeyValue conforming to the ++// "server.socket.domain" semantic conventions. It represents the domain name ++// of an immediate peer. ++func ServerSocketDomain(val string) attribute.KeyValue { ++ return ServerSocketDomainKey.String(val) ++} ++ ++// ServerSocketAddress returns an attribute KeyValue conforming to the ++// "server.socket.address" semantic conventions. It represents the physical ++// server IP address or Unix socket address. If set from the client, should ++// simply use the socket's peer address, and not attempt to find any actual ++// server IP (i.e., if set from client, this may represent some proxy server ++// instead of the logical server). ++func ServerSocketAddress(val string) attribute.KeyValue { ++ return ServerSocketAddressKey.String(val) ++} ++ ++// ServerSocketPort returns an attribute KeyValue conforming to the ++// "server.socket.port" semantic conventions. It represents the physical server ++// port. ++func ServerSocketPort(val int) attribute.KeyValue { ++ return ServerSocketPortKey.Int(val) ++} ++ ++// These attributes may be used to describe the sender of a network ++// exchange/packet. These should be used when there is no client/server ++// relationship between the two sides, or when that relationship is unknown. ++// This covers low-level network interactions (e.g. packet tracing) where you ++// don't know if there was a connection or which side initiated it. This also ++// covers unidirectional UDP flows and peer-to-peer communication where the ++// "user-facing" surface of the protocol / API does not expose a clear notion ++// of client and server. ++const ( ++ // SourceDomainKey is the attribute Key conforming to the "source.domain" ++ // semantic conventions. It represents the domain name of the source ++ // system. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'foo.example.com' ++ // Note: This value may be a host name, a fully qualified domain name, or ++ // another host naming format. ++ SourceDomainKey = attribute.Key("source.domain") ++ ++ // SourceAddressKey is the attribute Key conforming to the "source.address" ++ // semantic conventions. It represents the source address, for example IP ++ // address or Unix socket name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '10.5.3.2' ++ SourceAddressKey = attribute.Key("source.address") ++ ++ // SourcePortKey is the attribute Key conforming to the "source.port" ++ // semantic conventions. It represents the source port number ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3389, 2888 ++ SourcePortKey = attribute.Key("source.port") ++) ++ ++// SourceDomain returns an attribute KeyValue conforming to the ++// "source.domain" semantic conventions. It represents the domain name of the ++// source system. ++func SourceDomain(val string) attribute.KeyValue { ++ return SourceDomainKey.String(val) ++} ++ ++// SourceAddress returns an attribute KeyValue conforming to the ++// "source.address" semantic conventions. It represents the source address, for ++// example IP address or Unix socket name. ++func SourceAddress(val string) attribute.KeyValue { ++ return SourceAddressKey.String(val) ++} ++ ++// SourcePort returns an attribute KeyValue conforming to the "source.port" ++// semantic conventions. It represents the source port number ++func SourcePort(val int) attribute.KeyValue { ++ return SourcePortKey.Int(val) ++} ++ ++// These attributes may be used for any network related operation. ++const ( ++ // NetworkTransportKey is the attribute Key conforming to the ++ // "network.transport" semantic conventions. It represents the [OSI ++ // Transport Layer](https://osi-model.com/transport-layer/) or ++ // [Inter-process Communication ++ // method](https://en.wikipedia.org/wiki/Inter-process_communication). The ++ // value SHOULD be normalized to lowercase. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'tcp', 'udp' ++ NetworkTransportKey = attribute.Key("network.transport") ++ ++ // NetworkTypeKey is the attribute Key conforming to the "network.type" ++ // semantic conventions. It represents the [OSI Network ++ // Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The ++ // value SHOULD be normalized to lowercase. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'ipv4', 'ipv6' ++ NetworkTypeKey = attribute.Key("network.type") ++ ++ // NetworkProtocolNameKey is the attribute Key conforming to the ++ // "network.protocol.name" semantic conventions. It represents the [OSI ++ // Application Layer](https://osi-model.com/application-layer/) or non-OSI ++ // equivalent. The value SHOULD be normalized to lowercase. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'amqp', 'http', 'mqtt' ++ NetworkProtocolNameKey = attribute.Key("network.protocol.name") ++ ++ // NetworkProtocolVersionKey is the attribute Key conforming to the ++ // "network.protocol.version" semantic conventions. It represents the ++ // version of the application layer protocol used. See note below. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '3.1.1' ++ // Note: `network.protocol.version` refers to the version of the protocol ++ // used and might be different from the protocol client's version. If the ++ // HTTP client used has a version of `0.27.2`, but sends HTTP version ++ // `1.1`, this attribute should be set to `1.1`. ++ NetworkProtocolVersionKey = attribute.Key("network.protocol.version") ++) ++ ++var ( ++ // TCP ++ NetworkTransportTCP = NetworkTransportKey.String("tcp") ++ // UDP ++ NetworkTransportUDP = NetworkTransportKey.String("udp") ++ // Named or anonymous pipe. See note below ++ NetworkTransportPipe = NetworkTransportKey.String("pipe") ++ // Unix domain socket ++ NetworkTransportUnix = NetworkTransportKey.String("unix") ++) ++ ++var ( ++ // IPv4 ++ NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") ++ // IPv6 ++ NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") ++) ++ ++// NetworkProtocolName returns an attribute KeyValue conforming to the ++// "network.protocol.name" semantic conventions. It represents the [OSI ++// Application Layer](https://osi-model.com/application-layer/) or non-OSI ++// equivalent. The value SHOULD be normalized to lowercase. ++func NetworkProtocolName(val string) attribute.KeyValue { ++ return NetworkProtocolNameKey.String(val) ++} ++ ++// NetworkProtocolVersion returns an attribute KeyValue conforming to the ++// "network.protocol.version" semantic conventions. It represents the version ++// of the application layer protocol used. See note below. ++func NetworkProtocolVersion(val string) attribute.KeyValue { ++ return NetworkProtocolVersionKey.String(val) ++} ++ ++// These attributes may be used for any network related operation. ++const ( ++ // NetworkConnectionTypeKey is the attribute Key conforming to the ++ // "network.connection.type" semantic conventions. It represents the ++ // internet connection type. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'wifi' ++ NetworkConnectionTypeKey = attribute.Key("network.connection.type") ++ ++ // NetworkConnectionSubtypeKey is the attribute Key conforming to the ++ // "network.connection.subtype" semantic conventions. It represents the ++ // this describes more details regarding the connection.type. It may be the ++ // type of cell technology connection, but it could be used for describing ++ // details about a wifi connection. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'LTE' ++ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") ++ ++ // NetworkCarrierNameKey is the attribute Key conforming to the ++ // "network.carrier.name" semantic conventions. It represents the name of ++ // the mobile carrier. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'sprint' ++ NetworkCarrierNameKey = attribute.Key("network.carrier.name") ++ ++ // NetworkCarrierMccKey is the attribute Key conforming to the ++ // "network.carrier.mcc" semantic conventions. It represents the mobile ++ // carrier country code. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '310' ++ NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") ++ ++ // NetworkCarrierMncKey is the attribute Key conforming to the ++ // "network.carrier.mnc" semantic conventions. It represents the mobile ++ // carrier network code. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '001' ++ NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") ++ ++ // NetworkCarrierIccKey is the attribute Key conforming to the ++ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 ++ // alpha-2 2-character country code associated with the mobile carrier ++ // network. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'DE' ++ NetworkCarrierIccKey = attribute.Key("network.carrier.icc") ++) ++ ++var ( ++ // wifi ++ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") ++ // wired ++ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") ++ // cell ++ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") ++ // unavailable ++ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") ++ // unknown ++ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") ++) ++ ++var ( ++ // GPRS ++ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") ++ // EDGE ++ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") ++ // UMTS ++ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") ++ // CDMA ++ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") ++ // EVDO Rel. 0 ++ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") ++ // EVDO Rev. A ++ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") ++ // CDMA2000 1XRTT ++ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") ++ // HSDPA ++ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") ++ // HSUPA ++ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") ++ // HSPA ++ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") ++ // IDEN ++ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") ++ // EVDO Rev. B ++ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") ++ // LTE ++ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") ++ // EHRPD ++ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") ++ // HSPAP ++ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") ++ // GSM ++ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") ++ // TD-SCDMA ++ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") ++ // IWLAN ++ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") ++ // 5G NR (New Radio) ++ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") ++ // 5G NRNSA (New Radio Non-Standalone) ++ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") ++ // LTE CA ++ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") ++) ++ ++// NetworkCarrierName returns an attribute KeyValue conforming to the ++// "network.carrier.name" semantic conventions. It represents the name of the ++// mobile carrier. ++func NetworkCarrierName(val string) attribute.KeyValue { ++ return NetworkCarrierNameKey.String(val) ++} ++ ++// NetworkCarrierMcc returns an attribute KeyValue conforming to the ++// "network.carrier.mcc" semantic conventions. It represents the mobile carrier ++// country code. ++func NetworkCarrierMcc(val string) attribute.KeyValue { ++ return NetworkCarrierMccKey.String(val) ++} ++ ++// NetworkCarrierMnc returns an attribute KeyValue conforming to the ++// "network.carrier.mnc" semantic conventions. It represents the mobile carrier ++// network code. ++func NetworkCarrierMnc(val string) attribute.KeyValue { ++ return NetworkCarrierMncKey.String(val) ++} ++ ++// NetworkCarrierIcc returns an attribute KeyValue conforming to the ++// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 ++// alpha-2 2-character country code associated with the mobile carrier network. ++func NetworkCarrierIcc(val string) attribute.KeyValue { ++ return NetworkCarrierIccKey.String(val) ++} ++ ++// Semantic conventions for HTTP client and server Spans. ++const ( ++ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the ++ // "http.request.method_original" semantic conventions. It represents the ++ // original HTTP method sent by the client in the request line. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If and only if it's different ++ // than `http.request.method`.) ++ // Stability: stable ++ // Examples: 'GeT', 'ACL', 'foo' ++ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") ++ ++ // HTTPRequestBodySizeKey is the attribute Key conforming to the ++ // "http.request.body.size" semantic conventions. It represents the size of ++ // the request payload body in bytes. This is the number of bytes ++ // transferred excluding headers and is often, but not always, present as ++ // the ++ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++ // header. For requests using transport encoding, this should be the ++ // compressed size. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3495 ++ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") ++ ++ // HTTPResponseBodySizeKey is the attribute Key conforming to the ++ // "http.response.body.size" semantic conventions. It represents the size ++ // of the response payload body in bytes. This is the number of bytes ++ // transferred excluding headers and is often, but not always, present as ++ // the ++ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++ // header. For requests using transport encoding, this should be the ++ // compressed size. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3495 ++ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") ++) ++ ++// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the ++// "http.request.method_original" semantic conventions. It represents the ++// original HTTP method sent by the client in the request line. ++func HTTPRequestMethodOriginal(val string) attribute.KeyValue { ++ return HTTPRequestMethodOriginalKey.String(val) ++} ++ ++// HTTPRequestBodySize returns an attribute KeyValue conforming to the ++// "http.request.body.size" semantic conventions. It represents the size of the ++// request payload body in bytes. This is the number of bytes transferred ++// excluding headers and is often, but not always, present as the ++// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++// header. For requests using transport encoding, this should be the compressed ++// size. ++func HTTPRequestBodySize(val int) attribute.KeyValue { ++ return HTTPRequestBodySizeKey.Int(val) ++} ++ ++// HTTPResponseBodySize returns an attribute KeyValue conforming to the ++// "http.response.body.size" semantic conventions. It represents the size of ++// the response payload body in bytes. This is the number of bytes transferred ++// excluding headers and is often, but not always, present as the ++// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) ++// header. For requests using transport encoding, this should be the compressed ++// size. ++func HTTPResponseBodySize(val int) attribute.KeyValue { ++ return HTTPResponseBodySizeKey.Int(val) ++} ++ ++// Semantic convention describing per-message attributes populated on messaging ++// spans or links. ++const ( ++ // MessagingMessageIDKey is the attribute Key conforming to the ++ // "messaging.message.id" semantic conventions. It represents a value used ++ // by the messaging system as an identifier for the message, represented as ++ // a string. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '452a7c7c7c7048c2f887f61572b18fc2' ++ MessagingMessageIDKey = attribute.Key("messaging.message.id") ++ ++ // MessagingMessageConversationIDKey is the attribute Key conforming to the ++ // "messaging.message.conversation_id" semantic conventions. It represents ++ // the [conversation ID](#conversations) identifying the conversation to ++ // which the message belongs, represented as a string. Sometimes called ++ // "Correlation ID". ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MyConversationID' ++ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") ++ ++ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to ++ // the "messaging.message.payload_size_bytes" semantic conventions. It ++ // represents the (uncompressed) size of the message payload in bytes. Also ++ // use this attribute if it is unknown whether the compressed or ++ // uncompressed payload size is reported. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2738 ++ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") ++ ++ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key ++ // conforming to the "messaging.message.payload_compressed_size_bytes" ++ // semantic conventions. It represents the compressed size of the message ++ // payload in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2048 ++ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") ++) ++ ++// MessagingMessageID returns an attribute KeyValue conforming to the ++// "messaging.message.id" semantic conventions. It represents a value used by ++// the messaging system as an identifier for the message, represented as a ++// string. ++func MessagingMessageID(val string) attribute.KeyValue { ++ return MessagingMessageIDKey.String(val) ++} ++ ++// MessagingMessageConversationID returns an attribute KeyValue conforming ++// to the "messaging.message.conversation_id" semantic conventions. It ++// represents the [conversation ID](#conversations) identifying the ++// conversation to which the message belongs, represented as a string. ++// Sometimes called "Correlation ID". ++func MessagingMessageConversationID(val string) attribute.KeyValue { ++ return MessagingMessageConversationIDKey.String(val) ++} ++ ++// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming ++// to the "messaging.message.payload_size_bytes" semantic conventions. It ++// represents the (uncompressed) size of the message payload in bytes. Also use ++// this attribute if it is unknown whether the compressed or uncompressed ++// payload size is reported. ++func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { ++ return MessagingMessagePayloadSizeBytesKey.Int(val) ++} ++ ++// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue ++// conforming to the "messaging.message.payload_compressed_size_bytes" semantic ++// conventions. It represents the compressed size of the message payload in ++// bytes. ++func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { ++ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) ++} ++ ++// Semantic convention for attributes that describe messaging destination on ++// broker ++const ( ++ // MessagingDestinationNameKey is the attribute Key conforming to the ++ // "messaging.destination.name" semantic conventions. It represents the ++ // message destination name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MyQueue', 'MyTopic' ++ // Note: Destination name SHOULD uniquely identify a specific queue, topic ++ // or other entity within the broker. If ++ // the broker does not have such notion, the destination name SHOULD ++ // uniquely identify the broker. ++ MessagingDestinationNameKey = attribute.Key("messaging.destination.name") ++ ++ // MessagingDestinationTemplateKey is the attribute Key conforming to the ++ // "messaging.destination.template" semantic conventions. It represents the ++ // low cardinality representation of the messaging destination name ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/customers/{customerID}' ++ // Note: Destination names could be constructed from templates. An example ++ // would be a destination name involving a user name or product id. ++ // Although the destination name in this case is of high cardinality, the ++ // underlying template is of low cardinality and can be effectively used ++ // for grouping and aggregation. ++ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") ++ ++ // MessagingDestinationTemporaryKey is the attribute Key conforming to the ++ // "messaging.destination.temporary" semantic conventions. It represents a ++ // boolean that is true if the message destination is temporary and might ++ // not exist anymore after messages are processed. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") ++ ++ // MessagingDestinationAnonymousKey is the attribute Key conforming to the ++ // "messaging.destination.anonymous" semantic conventions. It represents a ++ // boolean that is true if the message destination is anonymous (could be ++ // unnamed or have auto-generated name). ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") ++) ++ ++// MessagingDestinationName returns an attribute KeyValue conforming to the ++// "messaging.destination.name" semantic conventions. It represents the message ++// destination name ++func MessagingDestinationName(val string) attribute.KeyValue { ++ return MessagingDestinationNameKey.String(val) ++} ++ ++// MessagingDestinationTemplate returns an attribute KeyValue conforming to ++// the "messaging.destination.template" semantic conventions. It represents the ++// low cardinality representation of the messaging destination name ++func MessagingDestinationTemplate(val string) attribute.KeyValue { ++ return MessagingDestinationTemplateKey.String(val) ++} ++ ++// MessagingDestinationTemporary returns an attribute KeyValue conforming to ++// the "messaging.destination.temporary" semantic conventions. It represents a ++// boolean that is true if the message destination is temporary and might not ++// exist anymore after messages are processed. ++func MessagingDestinationTemporary(val bool) attribute.KeyValue { ++ return MessagingDestinationTemporaryKey.Bool(val) ++} ++ ++// MessagingDestinationAnonymous returns an attribute KeyValue conforming to ++// the "messaging.destination.anonymous" semantic conventions. It represents a ++// boolean that is true if the message destination is anonymous (could be ++// unnamed or have auto-generated name). ++func MessagingDestinationAnonymous(val bool) attribute.KeyValue { ++ return MessagingDestinationAnonymousKey.Bool(val) ++} ++ ++// Attributes for RabbitMQ ++const ( ++ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key ++ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic ++ // conventions. It represents the rabbitMQ message routing key. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If not empty.) ++ // Stability: stable ++ // Examples: 'myKey' ++ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") ++) ++ ++// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue ++// conforming to the "messaging.rabbitmq.destination.routing_key" semantic ++// conventions. It represents the rabbitMQ message routing key. ++func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { ++ return MessagingRabbitmqDestinationRoutingKeyKey.String(val) ++} ++ ++// Attributes for Apache Kafka ++const ( ++ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the ++ // "messaging.kafka.message.key" semantic conventions. It represents the ++ // message keys in Kafka are used for grouping alike messages to ensure ++ // they're processed on the same partition. They differ from ++ // `messaging.message.id` in that they're not unique. If the key is `null`, ++ // the attribute MUST NOT be set. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'myKey' ++ // Note: If the key type is not string, it's string representation has to ++ // be supplied for the attribute. If the key has no unambiguous, canonical ++ // string form, don't include its value. ++ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") ++ ++ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the ++ // "messaging.kafka.consumer.group" semantic conventions. It represents the ++ // name of the Kafka Consumer Group that is handling the message. Only ++ // applies to consumers, not producers. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'my-group' ++ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") ++ ++ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to ++ // the "messaging.kafka.destination.partition" semantic conventions. It ++ // represents the partition the message is sent to. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 2 ++ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") ++ ++ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the ++ // "messaging.kafka.message.offset" semantic conventions. It represents the ++ // offset of a record in the corresponding Kafka partition. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") ++ ++ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the ++ // "messaging.kafka.message.tombstone" semantic conventions. It represents ++ // a boolean that is true if the message is a tombstone. ++ // ++ // Type: boolean ++ // RequirementLevel: ConditionallyRequired (If value is `true`. When ++ // missing, the value is assumed to be `false`.) ++ // Stability: stable ++ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") ++) ++ ++// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the ++// "messaging.kafka.message.key" semantic conventions. It represents the ++// message keys in Kafka are used for grouping alike messages to ensure they're ++// processed on the same partition. They differ from `messaging.message.id` in ++// that they're not unique. If the key is `null`, the attribute MUST NOT be ++// set. ++func MessagingKafkaMessageKey(val string) attribute.KeyValue { ++ return MessagingKafkaMessageKeyKey.String(val) ++} ++ ++// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to ++// the "messaging.kafka.consumer.group" semantic conventions. It represents the ++// name of the Kafka Consumer Group that is handling the message. Only applies ++// to consumers, not producers. ++func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { ++ return MessagingKafkaConsumerGroupKey.String(val) ++} ++ ++// MessagingKafkaDestinationPartition returns an attribute KeyValue ++// conforming to the "messaging.kafka.destination.partition" semantic ++// conventions. It represents the partition the message is sent to. ++func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { ++ return MessagingKafkaDestinationPartitionKey.Int(val) ++} ++ ++// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to ++// the "messaging.kafka.message.offset" semantic conventions. It represents the ++// offset of a record in the corresponding Kafka partition. ++func MessagingKafkaMessageOffset(val int) attribute.KeyValue { ++ return MessagingKafkaMessageOffsetKey.Int(val) ++} ++ ++// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming ++// to the "messaging.kafka.message.tombstone" semantic conventions. It ++// represents a boolean that is true if the message is a tombstone. ++func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { ++ return MessagingKafkaMessageTombstoneKey.Bool(val) ++} ++ ++// Attributes for Apache RocketMQ ++const ( ++ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the ++ // "messaging.rocketmq.namespace" semantic conventions. It represents the ++ // namespace of RocketMQ resources, resources in different namespaces are ++ // individual. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myNamespace' ++ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") ++ ++ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the ++ // "messaging.rocketmq.client_group" semantic conventions. It represents ++ // the name of the RocketMQ producer/consumer group that is handling the ++ // message. The client type is identified by the SpanKind. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myConsumerGroup' ++ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") ++ ++ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key ++ // conforming to the "messaging.rocketmq.message.delivery_timestamp" ++ // semantic conventions. It represents the timestamp in milliseconds that ++ // the delay message is expected to be delivered to consumer. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the message type is delay ++ // and delay time level is not specified.) ++ // Stability: stable ++ // Examples: 1665987217045 ++ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") ++ ++ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key ++ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic ++ // conventions. It represents the delay time level for delay message, which ++ // determines the message delay time. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the message type is delay ++ // and delivery timestamp is not specified.) ++ // Stability: stable ++ // Examples: 3 ++ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") ++ ++ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.group" semantic conventions. It represents ++ // the it is essential for FIFO message. Messages that belong to the same ++ // message group are always processed one by one within the same consumer ++ // group. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) ++ // Stability: stable ++ // Examples: 'myMessageGroup' ++ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") ++ ++ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.type" semantic conventions. It represents ++ // the type of message. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") ++ ++ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.tag" semantic conventions. It represents the ++ // secondary classifier of message besides topic. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'tagA' ++ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") ++ ++ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the ++ // "messaging.rocketmq.message.keys" semantic conventions. It represents ++ // the key(s) of message, another way to mark message besides message id. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'keyA', 'keyB' ++ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") ++ ++ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to ++ // the "messaging.rocketmq.consumption_model" semantic conventions. It ++ // represents the model of message consumption. This only applies to ++ // consumer spans. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ++) ++ ++var ( ++ // Normal message ++ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") ++ // FIFO message ++ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") ++ // Delay message ++ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") ++ // Transaction message ++ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ++) ++ ++var ( ++ // Clustering consumption model ++ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") ++ // Broadcasting consumption model ++ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ++) ++ ++// MessagingRocketmqNamespace returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.namespace" semantic conventions. It represents the ++// namespace of RocketMQ resources, resources in different namespaces are ++// individual. ++func MessagingRocketmqNamespace(val string) attribute.KeyValue { ++ return MessagingRocketmqNamespaceKey.String(val) ++} ++ ++// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.client_group" semantic conventions. It represents ++// the name of the RocketMQ producer/consumer group that is handling the ++// message. The client type is identified by the SpanKind. ++func MessagingRocketmqClientGroup(val string) attribute.KeyValue { ++ return MessagingRocketmqClientGroupKey.String(val) ++} ++ ++// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue ++// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic ++// conventions. It represents the timestamp in milliseconds that the delay ++// message is expected to be delivered to consumer. ++func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { ++ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) ++} ++ ++// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue ++// conforming to the "messaging.rocketmq.message.delay_time_level" semantic ++// conventions. It represents the delay time level for delay message, which ++// determines the message delay time. ++func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { ++ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) ++} ++ ++// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.group" semantic conventions. It represents ++// the it is essential for FIFO message. Messages that belong to the same ++// message group are always processed one by one within the same consumer ++// group. ++func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { ++ return MessagingRocketmqMessageGroupKey.String(val) ++} ++ ++// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.tag" semantic conventions. It represents the ++// secondary classifier of message besides topic. ++func MessagingRocketmqMessageTag(val string) attribute.KeyValue { ++ return MessagingRocketmqMessageTagKey.String(val) ++} ++ ++// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to ++// the "messaging.rocketmq.message.keys" semantic conventions. It represents ++// the key(s) of message, another way to mark message besides message id. ++func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { ++ return MessagingRocketmqMessageKeysKey.StringSlice(val) ++} ++ ++// Attributes describing URL. ++const ( ++ // URLSchemeKey is the attribute Key conforming to the "url.scheme" ++ // semantic conventions. It represents the [URI ++ // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component ++ // identifying the used protocol. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'https', 'ftp', 'telnet' ++ URLSchemeKey = attribute.Key("url.scheme") ++ ++ // URLFullKey is the attribute Key conforming to the "url.full" semantic ++ // conventions. It represents the absolute URL describing a network ++ // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', ++ // '//localhost' ++ // Note: For network calls, URL usually has ++ // `scheme://host[:port][path][?query][#fragment]` format, where the ++ // fragment is not transmitted over HTTP, but if it is known, it should be ++ // included nevertheless. ++ // `url.full` MUST NOT contain credentials passed via URL in form of ++ // `https://username:password@www.example.com/`. In such case username and ++ // password should be redacted and attribute's value should be ++ // `https://REDACTED:REDACTED@www.example.com/`. ++ // `url.full` SHOULD capture the absolute URL when it is available (or can ++ // be reconstructed) and SHOULD NOT be validated or modified except for ++ // sanitizing purposes. ++ URLFullKey = attribute.Key("url.full") ++ ++ // URLPathKey is the attribute Key conforming to the "url.path" semantic ++ // conventions. It represents the [URI ++ // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/search' ++ // Note: When missing, the value is assumed to be `/` ++ URLPathKey = attribute.Key("url.path") ++ ++ // URLQueryKey is the attribute Key conforming to the "url.query" semantic ++ // conventions. It represents the [URI ++ // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'q=OpenTelemetry' ++ // Note: Sensitive content provided in query string SHOULD be scrubbed when ++ // instrumentations can identify it. ++ URLQueryKey = attribute.Key("url.query") ++ ++ // URLFragmentKey is the attribute Key conforming to the "url.fragment" ++ // semantic conventions. It represents the [URI ++ // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'SemConv' ++ URLFragmentKey = attribute.Key("url.fragment") ++) ++ ++// URLScheme returns an attribute KeyValue conforming to the "url.scheme" ++// semantic conventions. It represents the [URI ++// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component ++// identifying the used protocol. ++func URLScheme(val string) attribute.KeyValue { ++ return URLSchemeKey.String(val) ++} ++ ++// URLFull returns an attribute KeyValue conforming to the "url.full" ++// semantic conventions. It represents the absolute URL describing a network ++// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) ++func URLFull(val string) attribute.KeyValue { ++ return URLFullKey.String(val) ++} ++ ++// URLPath returns an attribute KeyValue conforming to the "url.path" ++// semantic conventions. It represents the [URI ++// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component ++func URLPath(val string) attribute.KeyValue { ++ return URLPathKey.String(val) ++} ++ ++// URLQuery returns an attribute KeyValue conforming to the "url.query" ++// semantic conventions. It represents the [URI ++// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component ++func URLQuery(val string) attribute.KeyValue { ++ return URLQueryKey.String(val) ++} ++ ++// URLFragment returns an attribute KeyValue conforming to the ++// "url.fragment" semantic conventions. It represents the [URI ++// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component ++func URLFragment(val string) attribute.KeyValue { ++ return URLFragmentKey.String(val) ++} ++ ++// Describes user-agent attributes. ++const ( ++ // UserAgentOriginalKey is the attribute Key conforming to the ++ // "user_agent.original" semantic conventions. It represents the value of ++ // the [HTTP ++ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) ++ // header sent by the client. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' ++ UserAgentOriginalKey = attribute.Key("user_agent.original") ++) ++ ++// UserAgentOriginal returns an attribute KeyValue conforming to the ++// "user_agent.original" semantic conventions. It represents the value of the ++// [HTTP ++// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) ++// header sent by the client. ++func UserAgentOriginal(val string) attribute.KeyValue { ++ return UserAgentOriginalKey.String(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go +new file mode 100644 +index 00000000000..7cf424855e9 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package semconv implements OpenTelemetry semantic conventions. ++// ++// OpenTelemetry semantic conventions are agreed standardized naming ++// patterns for OpenTelemetry things. This package represents the conventions ++// as of the v1.21.0 version of the OpenTelemetry specification. ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go +new file mode 100644 +index 00000000000..30ae34fe478 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go +@@ -0,0 +1,199 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// This semantic convention defines the attributes used to represent a feature ++// flag evaluation as an event. ++const ( ++ // FeatureFlagKeyKey is the attribute Key conforming to the ++ // "feature_flag.key" semantic conventions. It represents the unique ++ // identifier of the feature flag. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'logo-color' ++ FeatureFlagKeyKey = attribute.Key("feature_flag.key") ++ ++ // FeatureFlagProviderNameKey is the attribute Key conforming to the ++ // "feature_flag.provider_name" semantic conventions. It represents the ++ // name of the service provider that performs the flag evaluation. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'Flag Manager' ++ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") ++ ++ // FeatureFlagVariantKey is the attribute Key conforming to the ++ // "feature_flag.variant" semantic conventions. It represents the sHOULD be ++ // a semantic identifier for a value. If one is unavailable, a stringified ++ // version of the value can be used. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'red', 'true', 'on' ++ // Note: A semantic identifier, commonly referred to as a variant, provides ++ // a means ++ // for referring to a value without including the value itself. This can ++ // provide additional context for understanding the meaning behind a value. ++ // For example, the variant `red` maybe be used for the value `#c05543`. ++ // ++ // A stringified version of the value can be used in situations where a ++ // semantic identifier is unavailable. String representation of the value ++ // should be determined by the implementer. ++ FeatureFlagVariantKey = attribute.Key("feature_flag.variant") ++) ++ ++// FeatureFlagKey returns an attribute KeyValue conforming to the ++// "feature_flag.key" semantic conventions. It represents the unique identifier ++// of the feature flag. ++func FeatureFlagKey(val string) attribute.KeyValue { ++ return FeatureFlagKeyKey.String(val) ++} ++ ++// FeatureFlagProviderName returns an attribute KeyValue conforming to the ++// "feature_flag.provider_name" semantic conventions. It represents the name of ++// the service provider that performs the flag evaluation. ++func FeatureFlagProviderName(val string) attribute.KeyValue { ++ return FeatureFlagProviderNameKey.String(val) ++} ++ ++// FeatureFlagVariant returns an attribute KeyValue conforming to the ++// "feature_flag.variant" semantic conventions. It represents the sHOULD be a ++// semantic identifier for a value. If one is unavailable, a stringified ++// version of the value can be used. ++func FeatureFlagVariant(val string) attribute.KeyValue { ++ return FeatureFlagVariantKey.String(val) ++} ++ ++// RPC received/sent message. ++const ( ++ // MessageTypeKey is the attribute Key conforming to the "message.type" ++ // semantic conventions. It represents the whether this is a received or ++ // sent message. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageTypeKey = attribute.Key("message.type") ++ ++ // MessageIDKey is the attribute Key conforming to the "message.id" ++ // semantic conventions. It represents the mUST be calculated as two ++ // different counters starting from `1` one for sent messages and one for ++ // received message. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: This way we guarantee that the values will be consistent between ++ // different implementations. ++ MessageIDKey = attribute.Key("message.id") ++ ++ // MessageCompressedSizeKey is the attribute Key conforming to the ++ // "message.compressed_size" semantic conventions. It represents the ++ // compressed size of the message in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageCompressedSizeKey = attribute.Key("message.compressed_size") ++ ++ // MessageUncompressedSizeKey is the attribute Key conforming to the ++ // "message.uncompressed_size" semantic conventions. It represents the ++ // uncompressed size of the message in bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ++) ++ ++var ( ++ // sent ++ MessageTypeSent = MessageTypeKey.String("SENT") ++ // received ++ MessageTypeReceived = MessageTypeKey.String("RECEIVED") ++) ++ ++// MessageID returns an attribute KeyValue conforming to the "message.id" ++// semantic conventions. It represents the mUST be calculated as two different ++// counters starting from `1` one for sent messages and one for received ++// message. ++func MessageID(val int) attribute.KeyValue { ++ return MessageIDKey.Int(val) ++} ++ ++// MessageCompressedSize returns an attribute KeyValue conforming to the ++// "message.compressed_size" semantic conventions. It represents the compressed ++// size of the message in bytes. ++func MessageCompressedSize(val int) attribute.KeyValue { ++ return MessageCompressedSizeKey.Int(val) ++} ++ ++// MessageUncompressedSize returns an attribute KeyValue conforming to the ++// "message.uncompressed_size" semantic conventions. It represents the ++// uncompressed size of the message in bytes. ++func MessageUncompressedSize(val int) attribute.KeyValue { ++ return MessageUncompressedSizeKey.Int(val) ++} ++ ++// The attributes used to report a single exception associated with a span. ++const ( ++ // ExceptionEscapedKey is the attribute Key conforming to the ++ // "exception.escaped" semantic conventions. It represents the sHOULD be ++ // set to true if the exception event is recorded at a point where it is ++ // known that the exception is escaping the scope of the span. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: An exception is considered to have escaped (or left) the scope of ++ // a span, ++ // if that span is ended while the exception is still logically "in ++ // flight". ++ // This may be actually "in flight" in some languages (e.g. if the ++ // exception ++ // is passed to a Context manager's `__exit__` method in Python) but will ++ // usually be caught at the point of recording the exception in most ++ // languages. ++ // ++ // It is usually not possible to determine at the point where an exception ++ // is thrown ++ // whether it will escape the scope of a span. ++ // However, it is trivial to know that an exception ++ // will escape, if one checks for an active exception just before ending ++ // the span, ++ // as done in the [example above](#recording-an-exception). ++ // ++ // It follows that an exception may still escape the scope of the span ++ // even if the `exception.escaped` attribute was not set or set to false, ++ // since the event might have been recorded at a time where it was not ++ // clear whether the exception will escape. ++ ExceptionEscapedKey = attribute.Key("exception.escaped") ++) ++ ++// ExceptionEscaped returns an attribute KeyValue conforming to the ++// "exception.escaped" semantic conventions. It represents the sHOULD be set to ++// true if the exception event is recorded at a point where it is known that ++// the exception is escaping the scope of the span. ++func ExceptionEscaped(val bool) attribute.KeyValue { ++ return ExceptionEscapedKey.Bool(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go +new file mode 100644 +index 00000000000..93d3c1760c9 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++const ( ++ // ExceptionEventName is the name of the Span event representing an exception. ++ ExceptionEventName = "exception" ++) +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go +new file mode 100644 +index 00000000000..b6d8935cf97 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go +@@ -0,0 +1,2310 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// The web browser in which the application represented by the resource is ++// running. The `browser.*` attributes MUST be used only for resources that ++// represent applications running in a web browser (regardless of whether ++// running on a mobile or desktop device). ++const ( ++ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" ++ // semantic conventions. It represents the array of brand name and version ++ // separated by a space ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.brands`). ++ BrowserBrandsKey = attribute.Key("browser.brands") ++ ++ // BrowserPlatformKey is the attribute Key conforming to the ++ // "browser.platform" semantic conventions. It represents the platform on ++ // which the browser is running ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Windows', 'macOS', 'Android' ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.platform`). If unavailable, the legacy ++ // `navigator.platform` API SHOULD NOT be used instead and this attribute ++ // SHOULD be left unset in order for the values to be consistent. ++ // The list of possible values is defined in the [W3C User-Agent Client ++ // Hints ++ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). ++ // Note that some (but not all) of these values can overlap with values in ++ // the [`os.type` and `os.name` attributes](./os.md). However, for ++ // consistency, the values in the `browser.platform` attribute should ++ // capture the exact value that the user agent provides. ++ BrowserPlatformKey = attribute.Key("browser.platform") ++ ++ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" ++ // semantic conventions. It represents a boolean that is true if the ++ // browser is running on a mobile device ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: This value is intended to be taken from the [UA client hints ++ // API](https://wicg.github.io/ua-client-hints/#interface) ++ // (`navigator.userAgentData.mobile`). If unavailable, this attribute ++ // SHOULD be left unset. ++ BrowserMobileKey = attribute.Key("browser.mobile") ++ ++ // BrowserLanguageKey is the attribute Key conforming to the ++ // "browser.language" semantic conventions. It represents the preferred ++ // language of the user using the browser ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'en', 'en-US', 'fr', 'fr-FR' ++ // Note: This value is intended to be taken from the Navigator API ++ // `navigator.language`. ++ BrowserLanguageKey = attribute.Key("browser.language") ++) ++ ++// BrowserBrands returns an attribute KeyValue conforming to the ++// "browser.brands" semantic conventions. It represents the array of brand name ++// and version separated by a space ++func BrowserBrands(val ...string) attribute.KeyValue { ++ return BrowserBrandsKey.StringSlice(val) ++} ++ ++// BrowserPlatform returns an attribute KeyValue conforming to the ++// "browser.platform" semantic conventions. It represents the platform on which ++// the browser is running ++func BrowserPlatform(val string) attribute.KeyValue { ++ return BrowserPlatformKey.String(val) ++} ++ ++// BrowserMobile returns an attribute KeyValue conforming to the ++// "browser.mobile" semantic conventions. It represents a boolean that is true ++// if the browser is running on a mobile device ++func BrowserMobile(val bool) attribute.KeyValue { ++ return BrowserMobileKey.Bool(val) ++} ++ ++// BrowserLanguage returns an attribute KeyValue conforming to the ++// "browser.language" semantic conventions. It represents the preferred ++// language of the user using the browser ++func BrowserLanguage(val string) attribute.KeyValue { ++ return BrowserLanguageKey.String(val) ++} ++ ++// A cloud environment (e.g. GCP, Azure, AWS) ++const ( ++ // CloudProviderKey is the attribute Key conforming to the "cloud.provider" ++ // semantic conventions. It represents the name of the cloud provider. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ CloudProviderKey = attribute.Key("cloud.provider") ++ ++ // CloudAccountIDKey is the attribute Key conforming to the ++ // "cloud.account.id" semantic conventions. It represents the cloud account ++ // ID the resource is assigned to. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '111111111111', 'opentelemetry' ++ CloudAccountIDKey = attribute.Key("cloud.account.id") ++ ++ // CloudRegionKey is the attribute Key conforming to the "cloud.region" ++ // semantic conventions. It represents the geographical region the resource ++ // is running. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-central1', 'us-east-1' ++ // Note: Refer to your provider's docs to see the available regions, for ++ // example [Alibaba Cloud ++ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS ++ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), ++ // [Azure ++ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), ++ // [Google Cloud regions](https://cloud.google.com/about/locations), or ++ // [Tencent Cloud ++ // regions](https://www.tencentcloud.com/document/product/213/6091). ++ CloudRegionKey = attribute.Key("cloud.region") ++ ++ // CloudResourceIDKey is the attribute Key conforming to the ++ // "cloud.resource_id" semantic conventions. It represents the cloud ++ // provider-specific native identifier of the monitored cloud resource ++ // (e.g. an ++ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) ++ // on AWS, a [fully qualified resource ++ // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) ++ // on Azure, a [full resource ++ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) ++ // on GCP) ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', ++ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', ++ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' ++ // Note: On some cloud providers, it may not be possible to determine the ++ // full ID at startup, ++ // so it may be necessary to set `cloud.resource_id` as a span attribute ++ // instead. ++ // ++ // The exact value to use for `cloud.resource_id` depends on the cloud ++ // provider. ++ // The following well-known definitions MUST be used if you set this ++ // attribute and they apply: ++ // ++ // * **AWS Lambda:** The function ++ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). ++ // Take care not to use the "invoked ARN" directly but replace any ++ // [alias ++ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) ++ // with the resolved function version, as the same runtime instance may ++ // be invokable with ++ // multiple different aliases. ++ // * **GCP:** The [URI of the ++ // resource](https://cloud.google.com/iam/docs/full-resource-names) ++ // * **Azure:** The [Fully Qualified Resource ++ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) ++ // of the invoked function, ++ // *not* the function app, having the form ++ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. ++ // This means that a span attribute MUST be used, as an Azure function ++ // app can host multiple functions that would usually share ++ // a TracerProvider. ++ CloudResourceIDKey = attribute.Key("cloud.resource_id") ++ ++ // CloudAvailabilityZoneKey is the attribute Key conforming to the ++ // "cloud.availability_zone" semantic conventions. It represents the cloud ++ // regions often have multiple, isolated locations known as zones to ++ // increase availability. Availability zone represents the zone where the ++ // resource is running. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-east-1c' ++ // Note: Availability zones are called "zones" on Alibaba Cloud and Google ++ // Cloud. ++ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") ++ ++ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" ++ // semantic conventions. It represents the cloud platform in use. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: The prefix of the service SHOULD match the one specified in ++ // `cloud.provider`. ++ CloudPlatformKey = attribute.Key("cloud.platform") ++) ++ ++var ( ++ // Alibaba Cloud ++ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") ++ // Amazon Web Services ++ CloudProviderAWS = CloudProviderKey.String("aws") ++ // Microsoft Azure ++ CloudProviderAzure = CloudProviderKey.String("azure") ++ // Google Cloud Platform ++ CloudProviderGCP = CloudProviderKey.String("gcp") ++ // Heroku Platform as a Service ++ CloudProviderHeroku = CloudProviderKey.String("heroku") ++ // IBM Cloud ++ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") ++ // Tencent Cloud ++ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ++) ++ ++var ( ++ // Alibaba Cloud Elastic Compute Service ++ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") ++ // Alibaba Cloud Function Compute ++ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") ++ // Red Hat OpenShift on Alibaba Cloud ++ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") ++ // AWS Elastic Compute Cloud ++ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") ++ // AWS Elastic Container Service ++ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") ++ // AWS Elastic Kubernetes Service ++ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") ++ // AWS Lambda ++ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") ++ // AWS Elastic Beanstalk ++ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") ++ // AWS App Runner ++ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") ++ // Red Hat OpenShift on AWS (ROSA) ++ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") ++ // Azure Virtual Machines ++ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") ++ // Azure Container Instances ++ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") ++ // Azure Kubernetes Service ++ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") ++ // Azure Functions ++ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") ++ // Azure App Service ++ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") ++ // Azure Red Hat OpenShift ++ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") ++ // Google Bare Metal Solution (BMS) ++ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") ++ // Google Cloud Compute Engine (GCE) ++ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") ++ // Google Cloud Run ++ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") ++ // Google Cloud Kubernetes Engine (GKE) ++ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") ++ // Google Cloud Functions (GCF) ++ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") ++ // Google Cloud App Engine (GAE) ++ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") ++ // Red Hat OpenShift on Google Cloud ++ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") ++ // Red Hat OpenShift on IBM Cloud ++ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") ++ // Tencent Cloud Cloud Virtual Machine (CVM) ++ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") ++ // Tencent Cloud Elastic Kubernetes Service (EKS) ++ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") ++ // Tencent Cloud Serverless Cloud Function (SCF) ++ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ++) ++ ++// CloudAccountID returns an attribute KeyValue conforming to the ++// "cloud.account.id" semantic conventions. It represents the cloud account ID ++// the resource is assigned to. ++func CloudAccountID(val string) attribute.KeyValue { ++ return CloudAccountIDKey.String(val) ++} ++ ++// CloudRegion returns an attribute KeyValue conforming to the ++// "cloud.region" semantic conventions. It represents the geographical region ++// the resource is running. ++func CloudRegion(val string) attribute.KeyValue { ++ return CloudRegionKey.String(val) ++} ++ ++// CloudResourceID returns an attribute KeyValue conforming to the ++// "cloud.resource_id" semantic conventions. It represents the cloud ++// provider-specific native identifier of the monitored cloud resource (e.g. an ++// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) ++// on AWS, a [fully qualified resource ++// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) ++// on Azure, a [full resource ++// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) ++// on GCP) ++func CloudResourceID(val string) attribute.KeyValue { ++ return CloudResourceIDKey.String(val) ++} ++ ++// CloudAvailabilityZone returns an attribute KeyValue conforming to the ++// "cloud.availability_zone" semantic conventions. It represents the cloud ++// regions often have multiple, isolated locations known as zones to increase ++// availability. Availability zone represents the zone where the resource is ++// running. ++func CloudAvailabilityZone(val string) attribute.KeyValue { ++ return CloudAvailabilityZoneKey.String(val) ++} ++ ++// Resources used by AWS Elastic Container Service (ECS). ++const ( ++ // AWSECSContainerARNKey is the attribute Key conforming to the ++ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon ++ // Resource Name (ARN) of an [ECS container ++ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' ++ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") ++ ++ // AWSECSClusterARNKey is the attribute Key conforming to the ++ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an ++ // [ECS ++ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' ++ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") ++ ++ // AWSECSLaunchtypeKey is the attribute Key conforming to the ++ // "aws.ecs.launchtype" semantic conventions. It represents the [launch ++ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) ++ // for an ECS task. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") ++ ++ // AWSECSTaskARNKey is the attribute Key conforming to the ++ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an ++ // [ECS task ++ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' ++ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") ++ ++ // AWSECSTaskFamilyKey is the attribute Key conforming to the ++ // "aws.ecs.task.family" semantic conventions. It represents the task ++ // definition family this task definition is a member of. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-family' ++ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") ++ ++ // AWSECSTaskRevisionKey is the attribute Key conforming to the ++ // "aws.ecs.task.revision" semantic conventions. It represents the revision ++ // for this task definition. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '8', '26' ++ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ++) ++ ++var ( ++ // ec2 ++ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") ++ // fargate ++ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ++) ++ ++// AWSECSContainerARN returns an attribute KeyValue conforming to the ++// "aws.ecs.container.arn" semantic conventions. It represents the Amazon ++// Resource Name (ARN) of an [ECS container ++// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). ++func AWSECSContainerARN(val string) attribute.KeyValue { ++ return AWSECSContainerARNKey.String(val) ++} ++ ++// AWSECSClusterARN returns an attribute KeyValue conforming to the ++// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS ++// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). ++func AWSECSClusterARN(val string) attribute.KeyValue { ++ return AWSECSClusterARNKey.String(val) ++} ++ ++// AWSECSTaskARN returns an attribute KeyValue conforming to the ++// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS ++// task ++// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). ++func AWSECSTaskARN(val string) attribute.KeyValue { ++ return AWSECSTaskARNKey.String(val) ++} ++ ++// AWSECSTaskFamily returns an attribute KeyValue conforming to the ++// "aws.ecs.task.family" semantic conventions. It represents the task ++// definition family this task definition is a member of. ++func AWSECSTaskFamily(val string) attribute.KeyValue { ++ return AWSECSTaskFamilyKey.String(val) ++} ++ ++// AWSECSTaskRevision returns an attribute KeyValue conforming to the ++// "aws.ecs.task.revision" semantic conventions. It represents the revision for ++// this task definition. ++func AWSECSTaskRevision(val string) attribute.KeyValue { ++ return AWSECSTaskRevisionKey.String(val) ++} ++ ++// Resources used by AWS Elastic Kubernetes Service (EKS). ++const ( ++ // AWSEKSClusterARNKey is the attribute Key conforming to the ++ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an ++ // EKS cluster. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' ++ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ++) ++ ++// AWSEKSClusterARN returns an attribute KeyValue conforming to the ++// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS ++// cluster. ++func AWSEKSClusterARN(val string) attribute.KeyValue { ++ return AWSEKSClusterARNKey.String(val) ++} ++ ++// Resources specific to Amazon Web Services. ++const ( ++ // AWSLogGroupNamesKey is the attribute Key conforming to the ++ // "aws.log.group.names" semantic conventions. It represents the name(s) of ++ // the AWS log group(s) an application is writing to. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/aws/lambda/my-function', 'opentelemetry-service' ++ // Note: Multiple log groups must be supported for cases like ++ // multi-container applications, where a single application has sidecar ++ // containers, and each write to their own log group. ++ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") ++ ++ // AWSLogGroupARNsKey is the attribute Key conforming to the ++ // "aws.log.group.arns" semantic conventions. It represents the Amazon ++ // Resource Name(s) (ARN) of the AWS log group(s). ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' ++ // Note: See the [log group ARN format ++ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). ++ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") ++ ++ // AWSLogStreamNamesKey is the attribute Key conforming to the ++ // "aws.log.stream.names" semantic conventions. It represents the name(s) ++ // of the AWS log stream(s) an application is writing to. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' ++ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") ++ ++ // AWSLogStreamARNsKey is the attribute Key conforming to the ++ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of ++ // the AWS log stream(s). ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' ++ // Note: See the [log stream ARN format ++ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). ++ // One log group can contain several log streams, so these ARNs necessarily ++ // identify both a log group and a log stream. ++ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ++) ++ ++// AWSLogGroupNames returns an attribute KeyValue conforming to the ++// "aws.log.group.names" semantic conventions. It represents the name(s) of the ++// AWS log group(s) an application is writing to. ++func AWSLogGroupNames(val ...string) attribute.KeyValue { ++ return AWSLogGroupNamesKey.StringSlice(val) ++} ++ ++// AWSLogGroupARNs returns an attribute KeyValue conforming to the ++// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource ++// Name(s) (ARN) of the AWS log group(s). ++func AWSLogGroupARNs(val ...string) attribute.KeyValue { ++ return AWSLogGroupARNsKey.StringSlice(val) ++} ++ ++// AWSLogStreamNames returns an attribute KeyValue conforming to the ++// "aws.log.stream.names" semantic conventions. It represents the name(s) of ++// the AWS log stream(s) an application is writing to. ++func AWSLogStreamNames(val ...string) attribute.KeyValue { ++ return AWSLogStreamNamesKey.StringSlice(val) ++} ++ ++// AWSLogStreamARNs returns an attribute KeyValue conforming to the ++// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the ++// AWS log stream(s). ++func AWSLogStreamARNs(val ...string) attribute.KeyValue { ++ return AWSLogStreamARNsKey.StringSlice(val) ++} ++ ++// Resource used by Google Cloud Run. ++const ( ++ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the ++ // "gcp.cloud_run.job.execution" semantic conventions. It represents the ++ // name of the Cloud Run ++ // [execution](https://cloud.google.com/run/docs/managing/job-executions) ++ // being run for the Job, as set by the ++ // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) ++ // environment variable. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'job-name-xxxx', 'sample-job-mdw84' ++ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") ++ ++ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the ++ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the ++ // index for a task within an execution as provided by the ++ // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) ++ // environment variable. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 0, 1 ++ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") ++) ++ ++// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the ++// "gcp.cloud_run.job.execution" semantic conventions. It represents the name ++// of the Cloud Run ++// [execution](https://cloud.google.com/run/docs/managing/job-executions) being ++// run for the Job, as set by the ++// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) ++// environment variable. ++func GCPCloudRunJobExecution(val string) attribute.KeyValue { ++ return GCPCloudRunJobExecutionKey.String(val) ++} ++ ++// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the ++// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index ++// for a task within an execution as provided by the ++// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) ++// environment variable. ++func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { ++ return GCPCloudRunJobTaskIndexKey.Int(val) ++} ++ ++// Resources used by Google Compute Engine (GCE). ++const ( ++ // GCPGceInstanceNameKey is the attribute Key conforming to the ++ // "gcp.gce.instance.name" semantic conventions. It represents the instance ++ // name of a GCE instance. This is the value provided by `host.name`, the ++ // visible name of the instance in the Cloud Console UI, and the prefix for ++ // the default hostname of the instance as defined by the [default internal ++ // DNS ++ // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'instance-1', 'my-vm-name' ++ GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") ++ ++ // GCPGceInstanceHostnameKey is the attribute Key conforming to the ++ // "gcp.gce.instance.hostname" semantic conventions. It represents the ++ // hostname of a GCE instance. This is the full value of the default or ++ // [custom ++ // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'my-host1234.example.com', ++ // 'sample-vm.us-west1-b.c.my-project.internal' ++ GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") ++) ++ ++// GCPGceInstanceName returns an attribute KeyValue conforming to the ++// "gcp.gce.instance.name" semantic conventions. It represents the instance ++// name of a GCE instance. This is the value provided by `host.name`, the ++// visible name of the instance in the Cloud Console UI, and the prefix for the ++// default hostname of the instance as defined by the [default internal DNS ++// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). ++func GCPGceInstanceName(val string) attribute.KeyValue { ++ return GCPGceInstanceNameKey.String(val) ++} ++ ++// GCPGceInstanceHostname returns an attribute KeyValue conforming to the ++// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname ++// of a GCE instance. This is the full value of the default or [custom ++// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). ++func GCPGceInstanceHostname(val string) attribute.KeyValue { ++ return GCPGceInstanceHostnameKey.String(val) ++} ++ ++// Heroku dyno metadata ++const ( ++ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the ++ // "heroku.release.creation_timestamp" semantic conventions. It represents ++ // the time and date the release was created ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2022-10-23T18:00:42Z' ++ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") ++ ++ // HerokuReleaseCommitKey is the attribute Key conforming to the ++ // "heroku.release.commit" semantic conventions. It represents the commit ++ // hash for the current release ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' ++ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") ++ ++ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" ++ // semantic conventions. It represents the unique identifier for the ++ // application ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' ++ HerokuAppIDKey = attribute.Key("heroku.app.id") ++) ++ ++// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming ++// to the "heroku.release.creation_timestamp" semantic conventions. It ++// represents the time and date the release was created ++func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { ++ return HerokuReleaseCreationTimestampKey.String(val) ++} ++ ++// HerokuReleaseCommit returns an attribute KeyValue conforming to the ++// "heroku.release.commit" semantic conventions. It represents the commit hash ++// for the current release ++func HerokuReleaseCommit(val string) attribute.KeyValue { ++ return HerokuReleaseCommitKey.String(val) ++} ++ ++// HerokuAppID returns an attribute KeyValue conforming to the ++// "heroku.app.id" semantic conventions. It represents the unique identifier ++// for the application ++func HerokuAppID(val string) attribute.KeyValue { ++ return HerokuAppIDKey.String(val) ++} ++ ++// A container instance. ++const ( ++ // ContainerNameKey is the attribute Key conforming to the "container.name" ++ // semantic conventions. It represents the container name used by container ++ // runtime. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-autoconf' ++ ContainerNameKey = attribute.Key("container.name") ++ ++ // ContainerIDKey is the attribute Key conforming to the "container.id" ++ // semantic conventions. It represents the container ID. Usually a UUID, as ++ // for example used to [identify Docker ++ // containers](https://docs.docker.com/engine/reference/run/#container-identification). ++ // The UUID might be abbreviated. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'a3bf90e006b2' ++ ContainerIDKey = attribute.Key("container.id") ++ ++ // ContainerRuntimeKey is the attribute Key conforming to the ++ // "container.runtime" semantic conventions. It represents the container ++ // runtime managing this container. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'docker', 'containerd', 'rkt' ++ ContainerRuntimeKey = attribute.Key("container.runtime") ++ ++ // ContainerImageNameKey is the attribute Key conforming to the ++ // "container.image.name" semantic conventions. It represents the name of ++ // the image the container was built on. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'gcr.io/opentelemetry/operator' ++ ContainerImageNameKey = attribute.Key("container.image.name") ++ ++ // ContainerImageTagKey is the attribute Key conforming to the ++ // "container.image.tag" semantic conventions. It represents the container ++ // image tag. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0.1' ++ ContainerImageTagKey = attribute.Key("container.image.tag") ++ ++ // ContainerImageIDKey is the attribute Key conforming to the ++ // "container.image.id" semantic conventions. It represents the runtime ++ // specific image identifier. Usually a hash algorithm followed by a UUID. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' ++ // Note: Docker defines a sha256 of the image id; `container.image.id` ++ // corresponds to the `Image` field from the Docker container inspect ++ // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) ++ // endpoint. ++ // K8S defines a link to the container registry repository with digest ++ // `"imageID": "registry.azurecr.io ++ // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. ++ // OCI defines a digest of manifest. ++ ContainerImageIDKey = attribute.Key("container.image.id") ++ ++ // ContainerCommandKey is the attribute Key conforming to the ++ // "container.command" semantic conventions. It represents the command used ++ // to run the container (i.e. the command name). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'otelcontribcol' ++ // Note: If using embedded credentials or sensitive data, it is recommended ++ // to remove them to prevent potential leakage. ++ ContainerCommandKey = attribute.Key("container.command") ++ ++ // ContainerCommandLineKey is the attribute Key conforming to the ++ // "container.command_line" semantic conventions. It represents the full ++ // command run by the container as a single string representing the full ++ // command. [2] ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'otelcontribcol --config config.yaml' ++ ContainerCommandLineKey = attribute.Key("container.command_line") ++ ++ // ContainerCommandArgsKey is the attribute Key conforming to the ++ // "container.command_args" semantic conventions. It represents the all the ++ // command arguments (including the command/executable itself) run by the ++ // container. [2] ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'otelcontribcol, --config, config.yaml' ++ ContainerCommandArgsKey = attribute.Key("container.command_args") ++) ++ ++// ContainerName returns an attribute KeyValue conforming to the ++// "container.name" semantic conventions. It represents the container name used ++// by container runtime. ++func ContainerName(val string) attribute.KeyValue { ++ return ContainerNameKey.String(val) ++} ++ ++// ContainerID returns an attribute KeyValue conforming to the ++// "container.id" semantic conventions. It represents the container ID. Usually ++// a UUID, as for example used to [identify Docker ++// containers](https://docs.docker.com/engine/reference/run/#container-identification). ++// The UUID might be abbreviated. ++func ContainerID(val string) attribute.KeyValue { ++ return ContainerIDKey.String(val) ++} ++ ++// ContainerRuntime returns an attribute KeyValue conforming to the ++// "container.runtime" semantic conventions. It represents the container ++// runtime managing this container. ++func ContainerRuntime(val string) attribute.KeyValue { ++ return ContainerRuntimeKey.String(val) ++} ++ ++// ContainerImageName returns an attribute KeyValue conforming to the ++// "container.image.name" semantic conventions. It represents the name of the ++// image the container was built on. ++func ContainerImageName(val string) attribute.KeyValue { ++ return ContainerImageNameKey.String(val) ++} ++ ++// ContainerImageTag returns an attribute KeyValue conforming to the ++// "container.image.tag" semantic conventions. It represents the container ++// image tag. ++func ContainerImageTag(val string) attribute.KeyValue { ++ return ContainerImageTagKey.String(val) ++} ++ ++// ContainerImageID returns an attribute KeyValue conforming to the ++// "container.image.id" semantic conventions. It represents the runtime ++// specific image identifier. Usually a hash algorithm followed by a UUID. ++func ContainerImageID(val string) attribute.KeyValue { ++ return ContainerImageIDKey.String(val) ++} ++ ++// ContainerCommand returns an attribute KeyValue conforming to the ++// "container.command" semantic conventions. It represents the command used to ++// run the container (i.e. the command name). ++func ContainerCommand(val string) attribute.KeyValue { ++ return ContainerCommandKey.String(val) ++} ++ ++// ContainerCommandLine returns an attribute KeyValue conforming to the ++// "container.command_line" semantic conventions. It represents the full ++// command run by the container as a single string representing the full ++// command. [2] ++func ContainerCommandLine(val string) attribute.KeyValue { ++ return ContainerCommandLineKey.String(val) ++} ++ ++// ContainerCommandArgs returns an attribute KeyValue conforming to the ++// "container.command_args" semantic conventions. It represents the all the ++// command arguments (including the command/executable itself) run by the ++// container. [2] ++func ContainerCommandArgs(val ...string) attribute.KeyValue { ++ return ContainerCommandArgsKey.StringSlice(val) ++} ++ ++// The software deployment. ++const ( ++ // DeploymentEnvironmentKey is the attribute Key conforming to the ++ // "deployment.environment" semantic conventions. It represents the name of ++ // the [deployment ++ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka ++ // deployment tier). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'staging', 'production' ++ DeploymentEnvironmentKey = attribute.Key("deployment.environment") ++) ++ ++// DeploymentEnvironment returns an attribute KeyValue conforming to the ++// "deployment.environment" semantic conventions. It represents the name of the ++// [deployment ++// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka ++// deployment tier). ++func DeploymentEnvironment(val string) attribute.KeyValue { ++ return DeploymentEnvironmentKey.String(val) ++} ++ ++// The device on which the process represented by this resource is running. ++const ( ++ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic ++ // conventions. It represents a unique identifier representing the device ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' ++ // Note: The device identifier MUST only be defined using the values ++ // outlined below. This value is not an advertising identifier and MUST NOT ++ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal ++ // to the [vendor ++ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). ++ // On Android (Java or Kotlin), this value MUST be equal to the Firebase ++ // Installation ID or a globally unique UUID which is persisted across ++ // sessions in your application. More information can be found ++ // [here](https://developer.android.com/training/articles/user-data-ids) on ++ // best practices and exact implementation details. Caution should be taken ++ // when storing personal data or anything which can identify a user. GDPR ++ // and data protection laws may apply, ensure you do your own due ++ // diligence. ++ DeviceIDKey = attribute.Key("device.id") ++ ++ // DeviceModelIdentifierKey is the attribute Key conforming to the ++ // "device.model.identifier" semantic conventions. It represents the model ++ // identifier for the device ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iPhone3,4', 'SM-G920F' ++ // Note: It's recommended this value represents a machine readable version ++ // of the model identifier rather than the market or consumer-friendly name ++ // of the device. ++ DeviceModelIdentifierKey = attribute.Key("device.model.identifier") ++ ++ // DeviceModelNameKey is the attribute Key conforming to the ++ // "device.model.name" semantic conventions. It represents the marketing ++ // name for the device model ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' ++ // Note: It's recommended this value represents a human readable version of ++ // the device model rather than a machine readable alternative. ++ DeviceModelNameKey = attribute.Key("device.model.name") ++ ++ // DeviceManufacturerKey is the attribute Key conforming to the ++ // "device.manufacturer" semantic conventions. It represents the name of ++ // the device manufacturer ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Apple', 'Samsung' ++ // Note: The Android OS provides this field via ++ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). ++ // iOS apps SHOULD hardcode the value `Apple`. ++ DeviceManufacturerKey = attribute.Key("device.manufacturer") ++) ++ ++// DeviceID returns an attribute KeyValue conforming to the "device.id" ++// semantic conventions. It represents a unique identifier representing the ++// device ++func DeviceID(val string) attribute.KeyValue { ++ return DeviceIDKey.String(val) ++} ++ ++// DeviceModelIdentifier returns an attribute KeyValue conforming to the ++// "device.model.identifier" semantic conventions. It represents the model ++// identifier for the device ++func DeviceModelIdentifier(val string) attribute.KeyValue { ++ return DeviceModelIdentifierKey.String(val) ++} ++ ++// DeviceModelName returns an attribute KeyValue conforming to the ++// "device.model.name" semantic conventions. It represents the marketing name ++// for the device model ++func DeviceModelName(val string) attribute.KeyValue { ++ return DeviceModelNameKey.String(val) ++} ++ ++// DeviceManufacturer returns an attribute KeyValue conforming to the ++// "device.manufacturer" semantic conventions. It represents the name of the ++// device manufacturer ++func DeviceManufacturer(val string) attribute.KeyValue { ++ return DeviceManufacturerKey.String(val) ++} ++ ++// A serverless instance. ++const ( ++ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic ++ // conventions. It represents the name of the single function that this ++ // runtime instance executes. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'my-function', 'myazurefunctionapp/some-function-name' ++ // Note: This is the name of the function as configured/deployed on the ++ // FaaS ++ // platform and is usually different from the name of the callback ++ // function (which may be stored in the ++ // [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes) ++ // span attributes). ++ // ++ // For some cloud providers, the above definition is ambiguous. The ++ // following ++ // definition of function name MUST be used for this attribute ++ // (and consequently the span name) for the listed cloud ++ // providers/products: ++ // ++ // * **Azure:** The full name `/`, i.e., function app name ++ // followed by a forward slash followed by the function name (this form ++ // can also be seen in the resource JSON for the function). ++ // This means that a span attribute MUST be used, as an Azure function ++ // app can host multiple functions that would usually share ++ // a TracerProvider (see also the `cloud.resource_id` attribute). ++ FaaSNameKey = attribute.Key("faas.name") ++ ++ // FaaSVersionKey is the attribute Key conforming to the "faas.version" ++ // semantic conventions. It represents the immutable version of the ++ // function being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '26', 'pinkfroid-00002' ++ // Note: Depending on the cloud provider and platform, use: ++ // ++ // * **AWS Lambda:** The [function ++ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) ++ // (an integer represented as a decimal string). ++ // * **Google Cloud Run (Services):** The ++ // [revision](https://cloud.google.com/run/docs/managing/revisions) ++ // (i.e., the function name plus the revision suffix). ++ // * **Google Cloud Functions:** The value of the ++ // [`K_REVISION` environment ++ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). ++ // * **Azure Functions:** Not applicable. Do not set this attribute. ++ FaaSVersionKey = attribute.Key("faas.version") ++ ++ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" ++ // semantic conventions. It represents the execution environment ID as a ++ // string, that will be potentially reused for other invocations to the ++ // same function/function version. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' ++ // Note: * **AWS Lambda:** Use the (full) log stream name. ++ FaaSInstanceKey = attribute.Key("faas.instance") ++ ++ // FaaSMaxMemoryKey is the attribute Key conforming to the ++ // "faas.max_memory" semantic conventions. It represents the amount of ++ // memory available to the serverless function converted to Bytes. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 134217728 ++ // Note: It's recommended to set this attribute since e.g. too little ++ // memory can easily stop a Java AWS Lambda function from working ++ // correctly. On AWS Lambda, the environment variable ++ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must ++ // be multiplied by 1,048,576). ++ FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ++) ++ ++// FaaSName returns an attribute KeyValue conforming to the "faas.name" ++// semantic conventions. It represents the name of the single function that ++// this runtime instance executes. ++func FaaSName(val string) attribute.KeyValue { ++ return FaaSNameKey.String(val) ++} ++ ++// FaaSVersion returns an attribute KeyValue conforming to the ++// "faas.version" semantic conventions. It represents the immutable version of ++// the function being executed. ++func FaaSVersion(val string) attribute.KeyValue { ++ return FaaSVersionKey.String(val) ++} ++ ++// FaaSInstance returns an attribute KeyValue conforming to the ++// "faas.instance" semantic conventions. It represents the execution ++// environment ID as a string, that will be potentially reused for other ++// invocations to the same function/function version. ++func FaaSInstance(val string) attribute.KeyValue { ++ return FaaSInstanceKey.String(val) ++} ++ ++// FaaSMaxMemory returns an attribute KeyValue conforming to the ++// "faas.max_memory" semantic conventions. It represents the amount of memory ++// available to the serverless function converted to Bytes. ++func FaaSMaxMemory(val int) attribute.KeyValue { ++ return FaaSMaxMemoryKey.Int(val) ++} ++ ++// A host is defined as a computing instance. For example, physical servers, ++// virtual machines, switches or disk array. ++const ( ++ // HostIDKey is the attribute Key conforming to the "host.id" semantic ++ // conventions. It represents the unique host ID. For Cloud, this must be ++ // the instance_id assigned by the cloud provider. For non-containerized ++ // systems, this should be the `machine-id`. See the table below for the ++ // sources to use to determine the `machine-id` based on operating system. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'fdbf79e8af94cb7f9e8df36789187052' ++ HostIDKey = attribute.Key("host.id") ++ ++ // HostNameKey is the attribute Key conforming to the "host.name" semantic ++ // conventions. It represents the name of the host. On Unix systems, it may ++ // contain what the hostname command returns, or the fully qualified ++ // hostname, or another name specified by the user. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-test' ++ HostNameKey = attribute.Key("host.name") ++ ++ // HostTypeKey is the attribute Key conforming to the "host.type" semantic ++ // conventions. It represents the type of host. For Cloud, this must be the ++ // machine type. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'n1-standard-1' ++ HostTypeKey = attribute.Key("host.type") ++ ++ // HostArchKey is the attribute Key conforming to the "host.arch" semantic ++ // conventions. It represents the CPU architecture the host system is ++ // running on. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ HostArchKey = attribute.Key("host.arch") ++ ++ // HostImageNameKey is the attribute Key conforming to the ++ // "host.image.name" semantic conventions. It represents the name of the VM ++ // image or OS install the host was instantiated from. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' ++ HostImageNameKey = attribute.Key("host.image.name") ++ ++ // HostImageIDKey is the attribute Key conforming to the "host.image.id" ++ // semantic conventions. It represents the vM image ID or host OS image ID. ++ // For Cloud, this value is from the provider. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'ami-07b06b442921831e5' ++ HostImageIDKey = attribute.Key("host.image.id") ++ ++ // HostImageVersionKey is the attribute Key conforming to the ++ // "host.image.version" semantic conventions. It represents the version ++ // string of the VM image or host OS as defined in [Version ++ // Attributes](README.md#version-attributes). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0.1' ++ HostImageVersionKey = attribute.Key("host.image.version") ++) ++ ++var ( ++ // AMD64 ++ HostArchAMD64 = HostArchKey.String("amd64") ++ // ARM32 ++ HostArchARM32 = HostArchKey.String("arm32") ++ // ARM64 ++ HostArchARM64 = HostArchKey.String("arm64") ++ // Itanium ++ HostArchIA64 = HostArchKey.String("ia64") ++ // 32-bit PowerPC ++ HostArchPPC32 = HostArchKey.String("ppc32") ++ // 64-bit PowerPC ++ HostArchPPC64 = HostArchKey.String("ppc64") ++ // IBM z/Architecture ++ HostArchS390x = HostArchKey.String("s390x") ++ // 32-bit x86 ++ HostArchX86 = HostArchKey.String("x86") ++) ++ ++// HostID returns an attribute KeyValue conforming to the "host.id" semantic ++// conventions. It represents the unique host ID. For Cloud, this must be the ++// instance_id assigned by the cloud provider. For non-containerized systems, ++// this should be the `machine-id`. See the table below for the sources to use ++// to determine the `machine-id` based on operating system. ++func HostID(val string) attribute.KeyValue { ++ return HostIDKey.String(val) ++} ++ ++// HostName returns an attribute KeyValue conforming to the "host.name" ++// semantic conventions. It represents the name of the host. On Unix systems, ++// it may contain what the hostname command returns, or the fully qualified ++// hostname, or another name specified by the user. ++func HostName(val string) attribute.KeyValue { ++ return HostNameKey.String(val) ++} ++ ++// HostType returns an attribute KeyValue conforming to the "host.type" ++// semantic conventions. It represents the type of host. For Cloud, this must ++// be the machine type. ++func HostType(val string) attribute.KeyValue { ++ return HostTypeKey.String(val) ++} ++ ++// HostImageName returns an attribute KeyValue conforming to the ++// "host.image.name" semantic conventions. It represents the name of the VM ++// image or OS install the host was instantiated from. ++func HostImageName(val string) attribute.KeyValue { ++ return HostImageNameKey.String(val) ++} ++ ++// HostImageID returns an attribute KeyValue conforming to the ++// "host.image.id" semantic conventions. It represents the vM image ID or host ++// OS image ID. For Cloud, this value is from the provider. ++func HostImageID(val string) attribute.KeyValue { ++ return HostImageIDKey.String(val) ++} ++ ++// HostImageVersion returns an attribute KeyValue conforming to the ++// "host.image.version" semantic conventions. It represents the version string ++// of the VM image or host OS as defined in [Version ++// Attributes](README.md#version-attributes). ++func HostImageVersion(val string) attribute.KeyValue { ++ return HostImageVersionKey.String(val) ++} ++ ++// A Kubernetes Cluster. ++const ( ++ // K8SClusterNameKey is the attribute Key conforming to the ++ // "k8s.cluster.name" semantic conventions. It represents the name of the ++ // cluster. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-cluster' ++ K8SClusterNameKey = attribute.Key("k8s.cluster.name") ++ ++ // K8SClusterUIDKey is the attribute Key conforming to the ++ // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for ++ // the cluster, set to the UID of the `kube-system` namespace. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' ++ // Note: K8S does not have support for obtaining a cluster ID. If this is ++ // ever ++ // added, we will recommend collecting the `k8s.cluster.uid` through the ++ // official APIs. In the meantime, we are able to use the `uid` of the ++ // `kube-system` namespace as a proxy for cluster ID. Read on for the ++ // rationale. ++ // ++ // Every object created in a K8S cluster is assigned a distinct UID. The ++ // `kube-system` namespace is used by Kubernetes itself and will exist ++ // for the lifetime of the cluster. Using the `uid` of the `kube-system` ++ // namespace is a reasonable proxy for the K8S ClusterID as it will only ++ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are ++ // UUIDs as standardized by ++ // [ISO/IEC 9834-8 and ITU-T ++ // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). ++ // Which states: ++ // ++ // > If generated according to one of the mechanisms defined in Rec. ++ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be ++ // different from all other UUIDs generated before 3603 A.D., or is ++ // extremely likely to be different (depending on the mechanism chosen). ++ // ++ // Therefore, UIDs between clusters should be extremely unlikely to ++ // conflict. ++ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") ++) ++ ++// K8SClusterName returns an attribute KeyValue conforming to the ++// "k8s.cluster.name" semantic conventions. It represents the name of the ++// cluster. ++func K8SClusterName(val string) attribute.KeyValue { ++ return K8SClusterNameKey.String(val) ++} ++ ++// K8SClusterUID returns an attribute KeyValue conforming to the ++// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the ++// cluster, set to the UID of the `kube-system` namespace. ++func K8SClusterUID(val string) attribute.KeyValue { ++ return K8SClusterUIDKey.String(val) ++} ++ ++// A Kubernetes Node object. ++const ( ++ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" ++ // semantic conventions. It represents the name of the Node. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'node-1' ++ K8SNodeNameKey = attribute.Key("k8s.node.name") ++ ++ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" ++ // semantic conventions. It represents the UID of the Node. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' ++ K8SNodeUIDKey = attribute.Key("k8s.node.uid") ++) ++ ++// K8SNodeName returns an attribute KeyValue conforming to the ++// "k8s.node.name" semantic conventions. It represents the name of the Node. ++func K8SNodeName(val string) attribute.KeyValue { ++ return K8SNodeNameKey.String(val) ++} ++ ++// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" ++// semantic conventions. It represents the UID of the Node. ++func K8SNodeUID(val string) attribute.KeyValue { ++ return K8SNodeUIDKey.String(val) ++} ++ ++// A Kubernetes Namespace. ++const ( ++ // K8SNamespaceNameKey is the attribute Key conforming to the ++ // "k8s.namespace.name" semantic conventions. It represents the name of the ++ // namespace that the pod is running in. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'default' ++ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ++) ++ ++// K8SNamespaceName returns an attribute KeyValue conforming to the ++// "k8s.namespace.name" semantic conventions. It represents the name of the ++// namespace that the pod is running in. ++func K8SNamespaceName(val string) attribute.KeyValue { ++ return K8SNamespaceNameKey.String(val) ++} ++ ++// A Kubernetes Pod object. ++const ( ++ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" ++ // semantic conventions. It represents the UID of the Pod. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SPodUIDKey = attribute.Key("k8s.pod.uid") ++ ++ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" ++ // semantic conventions. It represents the name of the Pod. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry-pod-autoconf' ++ K8SPodNameKey = attribute.Key("k8s.pod.name") ++) ++ ++// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" ++// semantic conventions. It represents the UID of the Pod. ++func K8SPodUID(val string) attribute.KeyValue { ++ return K8SPodUIDKey.String(val) ++} ++ ++// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" ++// semantic conventions. It represents the name of the Pod. ++func K8SPodName(val string) attribute.KeyValue { ++ return K8SPodNameKey.String(val) ++} ++ ++// A container in a ++// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). ++const ( ++ // K8SContainerNameKey is the attribute Key conforming to the ++ // "k8s.container.name" semantic conventions. It represents the name of the ++ // Container from Pod specification, must be unique within a Pod. Container ++ // runtime usually uses different globally unique name (`container.name`). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'redis' ++ K8SContainerNameKey = attribute.Key("k8s.container.name") ++ ++ // K8SContainerRestartCountKey is the attribute Key conforming to the ++ // "k8s.container.restart_count" semantic conventions. It represents the ++ // number of times the container was restarted. This attribute can be used ++ // to identify a particular container (running or stopped) within a ++ // container spec. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 0, 2 ++ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ++) ++ ++// K8SContainerName returns an attribute KeyValue conforming to the ++// "k8s.container.name" semantic conventions. It represents the name of the ++// Container from Pod specification, must be unique within a Pod. Container ++// runtime usually uses different globally unique name (`container.name`). ++func K8SContainerName(val string) attribute.KeyValue { ++ return K8SContainerNameKey.String(val) ++} ++ ++// K8SContainerRestartCount returns an attribute KeyValue conforming to the ++// "k8s.container.restart_count" semantic conventions. It represents the number ++// of times the container was restarted. This attribute can be used to identify ++// a particular container (running or stopped) within a container spec. ++func K8SContainerRestartCount(val int) attribute.KeyValue { ++ return K8SContainerRestartCountKey.Int(val) ++} ++ ++// A Kubernetes ReplicaSet object. ++const ( ++ // K8SReplicaSetUIDKey is the attribute Key conforming to the ++ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the ++ // ReplicaSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") ++ ++ // K8SReplicaSetNameKey is the attribute Key conforming to the ++ // "k8s.replicaset.name" semantic conventions. It represents the name of ++ // the ReplicaSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ++) ++ ++// K8SReplicaSetUID returns an attribute KeyValue conforming to the ++// "k8s.replicaset.uid" semantic conventions. It represents the UID of the ++// ReplicaSet. ++func K8SReplicaSetUID(val string) attribute.KeyValue { ++ return K8SReplicaSetUIDKey.String(val) ++} ++ ++// K8SReplicaSetName returns an attribute KeyValue conforming to the ++// "k8s.replicaset.name" semantic conventions. It represents the name of the ++// ReplicaSet. ++func K8SReplicaSetName(val string) attribute.KeyValue { ++ return K8SReplicaSetNameKey.String(val) ++} ++ ++// A Kubernetes Deployment object. ++const ( ++ // K8SDeploymentUIDKey is the attribute Key conforming to the ++ // "k8s.deployment.uid" semantic conventions. It represents the UID of the ++ // Deployment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") ++ ++ // K8SDeploymentNameKey is the attribute Key conforming to the ++ // "k8s.deployment.name" semantic conventions. It represents the name of ++ // the Deployment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ++) ++ ++// K8SDeploymentUID returns an attribute KeyValue conforming to the ++// "k8s.deployment.uid" semantic conventions. It represents the UID of the ++// Deployment. ++func K8SDeploymentUID(val string) attribute.KeyValue { ++ return K8SDeploymentUIDKey.String(val) ++} ++ ++// K8SDeploymentName returns an attribute KeyValue conforming to the ++// "k8s.deployment.name" semantic conventions. It represents the name of the ++// Deployment. ++func K8SDeploymentName(val string) attribute.KeyValue { ++ return K8SDeploymentNameKey.String(val) ++} ++ ++// A Kubernetes StatefulSet object. ++const ( ++ // K8SStatefulSetUIDKey is the attribute Key conforming to the ++ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the ++ // StatefulSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") ++ ++ // K8SStatefulSetNameKey is the attribute Key conforming to the ++ // "k8s.statefulset.name" semantic conventions. It represents the name of ++ // the StatefulSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ++) ++ ++// K8SStatefulSetUID returns an attribute KeyValue conforming to the ++// "k8s.statefulset.uid" semantic conventions. It represents the UID of the ++// StatefulSet. ++func K8SStatefulSetUID(val string) attribute.KeyValue { ++ return K8SStatefulSetUIDKey.String(val) ++} ++ ++// K8SStatefulSetName returns an attribute KeyValue conforming to the ++// "k8s.statefulset.name" semantic conventions. It represents the name of the ++// StatefulSet. ++func K8SStatefulSetName(val string) attribute.KeyValue { ++ return K8SStatefulSetNameKey.String(val) ++} ++ ++// A Kubernetes DaemonSet object. ++const ( ++ // K8SDaemonSetUIDKey is the attribute Key conforming to the ++ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the ++ // DaemonSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") ++ ++ // K8SDaemonSetNameKey is the attribute Key conforming to the ++ // "k8s.daemonset.name" semantic conventions. It represents the name of the ++ // DaemonSet. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ++) ++ ++// K8SDaemonSetUID returns an attribute KeyValue conforming to the ++// "k8s.daemonset.uid" semantic conventions. It represents the UID of the ++// DaemonSet. ++func K8SDaemonSetUID(val string) attribute.KeyValue { ++ return K8SDaemonSetUIDKey.String(val) ++} ++ ++// K8SDaemonSetName returns an attribute KeyValue conforming to the ++// "k8s.daemonset.name" semantic conventions. It represents the name of the ++// DaemonSet. ++func K8SDaemonSetName(val string) attribute.KeyValue { ++ return K8SDaemonSetNameKey.String(val) ++} ++ ++// A Kubernetes Job object. ++const ( ++ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" ++ // semantic conventions. It represents the UID of the Job. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SJobUIDKey = attribute.Key("k8s.job.uid") ++ ++ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" ++ // semantic conventions. It represents the name of the Job. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SJobNameKey = attribute.Key("k8s.job.name") ++) ++ ++// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" ++// semantic conventions. It represents the UID of the Job. ++func K8SJobUID(val string) attribute.KeyValue { ++ return K8SJobUIDKey.String(val) ++} ++ ++// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" ++// semantic conventions. It represents the name of the Job. ++func K8SJobName(val string) attribute.KeyValue { ++ return K8SJobNameKey.String(val) ++} ++ ++// A Kubernetes CronJob object. ++const ( ++ // K8SCronJobUIDKey is the attribute Key conforming to the ++ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the ++ // CronJob. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' ++ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") ++ ++ // K8SCronJobNameKey is the attribute Key conforming to the ++ // "k8s.cronjob.name" semantic conventions. It represents the name of the ++ // CronJob. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ++) ++ ++// K8SCronJobUID returns an attribute KeyValue conforming to the ++// "k8s.cronjob.uid" semantic conventions. It represents the UID of the ++// CronJob. ++func K8SCronJobUID(val string) attribute.KeyValue { ++ return K8SCronJobUIDKey.String(val) ++} ++ ++// K8SCronJobName returns an attribute KeyValue conforming to the ++// "k8s.cronjob.name" semantic conventions. It represents the name of the ++// CronJob. ++func K8SCronJobName(val string) attribute.KeyValue { ++ return K8SCronJobNameKey.String(val) ++} ++ ++// The operating system (OS) on which the process represented by this resource ++// is running. ++const ( ++ // OSTypeKey is the attribute Key conforming to the "os.type" semantic ++ // conventions. It represents the operating system type. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ OSTypeKey = attribute.Key("os.type") ++ ++ // OSDescriptionKey is the attribute Key conforming to the "os.description" ++ // semantic conventions. It represents the human readable (not intended to ++ // be parsed) OS version information, like e.g. reported by `ver` or ++ // `lsb_release -a` commands. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 ++ // LTS' ++ OSDescriptionKey = attribute.Key("os.description") ++ ++ // OSNameKey is the attribute Key conforming to the "os.name" semantic ++ // conventions. It represents the human readable operating system name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'iOS', 'Android', 'Ubuntu' ++ OSNameKey = attribute.Key("os.name") ++ ++ // OSVersionKey is the attribute Key conforming to the "os.version" ++ // semantic conventions. It represents the version string of the operating ++ // system as defined in [Version ++ // Attributes](/docs/resource/README.md#version-attributes). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '14.2.1', '18.04.1' ++ OSVersionKey = attribute.Key("os.version") ++) ++ ++var ( ++ // Microsoft Windows ++ OSTypeWindows = OSTypeKey.String("windows") ++ // Linux ++ OSTypeLinux = OSTypeKey.String("linux") ++ // Apple Darwin ++ OSTypeDarwin = OSTypeKey.String("darwin") ++ // FreeBSD ++ OSTypeFreeBSD = OSTypeKey.String("freebsd") ++ // NetBSD ++ OSTypeNetBSD = OSTypeKey.String("netbsd") ++ // OpenBSD ++ OSTypeOpenBSD = OSTypeKey.String("openbsd") ++ // DragonFly BSD ++ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") ++ // HP-UX (Hewlett Packard Unix) ++ OSTypeHPUX = OSTypeKey.String("hpux") ++ // AIX (Advanced Interactive eXecutive) ++ OSTypeAIX = OSTypeKey.String("aix") ++ // SunOS, Oracle Solaris ++ OSTypeSolaris = OSTypeKey.String("solaris") ++ // IBM z/OS ++ OSTypeZOS = OSTypeKey.String("z_os") ++) ++ ++// OSDescription returns an attribute KeyValue conforming to the ++// "os.description" semantic conventions. It represents the human readable (not ++// intended to be parsed) OS version information, like e.g. reported by `ver` ++// or `lsb_release -a` commands. ++func OSDescription(val string) attribute.KeyValue { ++ return OSDescriptionKey.String(val) ++} ++ ++// OSName returns an attribute KeyValue conforming to the "os.name" semantic ++// conventions. It represents the human readable operating system name. ++func OSName(val string) attribute.KeyValue { ++ return OSNameKey.String(val) ++} ++ ++// OSVersion returns an attribute KeyValue conforming to the "os.version" ++// semantic conventions. It represents the version string of the operating ++// system as defined in [Version ++// Attributes](/docs/resource/README.md#version-attributes). ++func OSVersion(val string) attribute.KeyValue { ++ return OSVersionKey.String(val) ++} ++ ++// An operating system process. ++const ( ++ // ProcessPIDKey is the attribute Key conforming to the "process.pid" ++ // semantic conventions. It represents the process identifier (PID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1234 ++ ProcessPIDKey = attribute.Key("process.pid") ++ ++ // ProcessParentPIDKey is the attribute Key conforming to the ++ // "process.parent_pid" semantic conventions. It represents the parent ++ // Process identifier (PID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 111 ++ ProcessParentPIDKey = attribute.Key("process.parent_pid") ++ ++ // ProcessExecutableNameKey is the attribute Key conforming to the ++ // "process.executable.name" semantic conventions. It represents the name ++ // of the process executable. On Linux based systems, can be set to the ++ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name ++ // of `GetProcessImageFileNameW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'otelcol' ++ ProcessExecutableNameKey = attribute.Key("process.executable.name") ++ ++ // ProcessExecutablePathKey is the attribute Key conforming to the ++ // "process.executable.path" semantic conventions. It represents the full ++ // path to the process executable. On Linux based systems, can be set to ++ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of ++ // `GetProcessImageFileNameW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: '/usr/bin/cmd/otelcol' ++ ProcessExecutablePathKey = attribute.Key("process.executable.path") ++ ++ // ProcessCommandKey is the attribute Key conforming to the ++ // "process.command" semantic conventions. It represents the command used ++ // to launch the process (i.e. the command name). On Linux based systems, ++ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can ++ // be set to the first parameter extracted from `GetCommandLineW`. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'cmd/otelcol' ++ ProcessCommandKey = attribute.Key("process.command") ++ ++ // ProcessCommandLineKey is the attribute Key conforming to the ++ // "process.command_line" semantic conventions. It represents the full ++ // command used to launch the process as a single string representing the ++ // full command. On Windows, can be set to the result of `GetCommandLineW`. ++ // Do not set this if you have to assemble it just for monitoring; use ++ // `process.command_args` instead. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ++ ProcessCommandLineKey = attribute.Key("process.command_line") ++ ++ // ProcessCommandArgsKey is the attribute Key conforming to the ++ // "process.command_args" semantic conventions. It represents the all the ++ // command arguments (including the command/executable itself) as received ++ // by the process. On Linux-based systems (and some other Unixoid systems ++ // supporting procfs), can be set according to the list of null-delimited ++ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, ++ // this would be the full argv vector passed to `main`. ++ // ++ // Type: string[] ++ // RequirementLevel: ConditionallyRequired (See alternative attributes ++ // below.) ++ // Stability: stable ++ // Examples: 'cmd/otecol', '--config=config.yaml' ++ ProcessCommandArgsKey = attribute.Key("process.command_args") ++ ++ // ProcessOwnerKey is the attribute Key conforming to the "process.owner" ++ // semantic conventions. It represents the username of the user that owns ++ // the process. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'root' ++ ProcessOwnerKey = attribute.Key("process.owner") ++) ++ ++// ProcessPID returns an attribute KeyValue conforming to the "process.pid" ++// semantic conventions. It represents the process identifier (PID). ++func ProcessPID(val int) attribute.KeyValue { ++ return ProcessPIDKey.Int(val) ++} ++ ++// ProcessParentPID returns an attribute KeyValue conforming to the ++// "process.parent_pid" semantic conventions. It represents the parent Process ++// identifier (PID). ++func ProcessParentPID(val int) attribute.KeyValue { ++ return ProcessParentPIDKey.Int(val) ++} ++ ++// ProcessExecutableName returns an attribute KeyValue conforming to the ++// "process.executable.name" semantic conventions. It represents the name of ++// the process executable. On Linux based systems, can be set to the `Name` in ++// `proc/[pid]/status`. On Windows, can be set to the base name of ++// `GetProcessImageFileNameW`. ++func ProcessExecutableName(val string) attribute.KeyValue { ++ return ProcessExecutableNameKey.String(val) ++} ++ ++// ProcessExecutablePath returns an attribute KeyValue conforming to the ++// "process.executable.path" semantic conventions. It represents the full path ++// to the process executable. On Linux based systems, can be set to the target ++// of `proc/[pid]/exe`. On Windows, can be set to the result of ++// `GetProcessImageFileNameW`. ++func ProcessExecutablePath(val string) attribute.KeyValue { ++ return ProcessExecutablePathKey.String(val) ++} ++ ++// ProcessCommand returns an attribute KeyValue conforming to the ++// "process.command" semantic conventions. It represents the command used to ++// launch the process (i.e. the command name). On Linux based systems, can be ++// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to ++// the first parameter extracted from `GetCommandLineW`. ++func ProcessCommand(val string) attribute.KeyValue { ++ return ProcessCommandKey.String(val) ++} ++ ++// ProcessCommandLine returns an attribute KeyValue conforming to the ++// "process.command_line" semantic conventions. It represents the full command ++// used to launch the process as a single string representing the full command. ++// On Windows, can be set to the result of `GetCommandLineW`. Do not set this ++// if you have to assemble it just for monitoring; use `process.command_args` ++// instead. ++func ProcessCommandLine(val string) attribute.KeyValue { ++ return ProcessCommandLineKey.String(val) ++} ++ ++// ProcessCommandArgs returns an attribute KeyValue conforming to the ++// "process.command_args" semantic conventions. It represents the all the ++// command arguments (including the command/executable itself) as received by ++// the process. On Linux-based systems (and some other Unixoid systems ++// supporting procfs), can be set according to the list of null-delimited ++// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, ++// this would be the full argv vector passed to `main`. ++func ProcessCommandArgs(val ...string) attribute.KeyValue { ++ return ProcessCommandArgsKey.StringSlice(val) ++} ++ ++// ProcessOwner returns an attribute KeyValue conforming to the ++// "process.owner" semantic conventions. It represents the username of the user ++// that owns the process. ++func ProcessOwner(val string) attribute.KeyValue { ++ return ProcessOwnerKey.String(val) ++} ++ ++// The single (language) runtime instance which is monitored. ++const ( ++ // ProcessRuntimeNameKey is the attribute Key conforming to the ++ // "process.runtime.name" semantic conventions. It represents the name of ++ // the runtime of this process. For compiled native binaries, this SHOULD ++ // be the name of the compiler. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'OpenJDK Runtime Environment' ++ ProcessRuntimeNameKey = attribute.Key("process.runtime.name") ++ ++ // ProcessRuntimeVersionKey is the attribute Key conforming to the ++ // "process.runtime.version" semantic conventions. It represents the ++ // version of the runtime of this process, as returned by the runtime ++ // without modification. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '14.0.2' ++ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") ++ ++ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the ++ // "process.runtime.description" semantic conventions. It represents an ++ // additional description about the runtime of the process, for example a ++ // specific vendor customization of the runtime environment. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ++ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ++) ++ ++// ProcessRuntimeName returns an attribute KeyValue conforming to the ++// "process.runtime.name" semantic conventions. It represents the name of the ++// runtime of this process. For compiled native binaries, this SHOULD be the ++// name of the compiler. ++func ProcessRuntimeName(val string) attribute.KeyValue { ++ return ProcessRuntimeNameKey.String(val) ++} ++ ++// ProcessRuntimeVersion returns an attribute KeyValue conforming to the ++// "process.runtime.version" semantic conventions. It represents the version of ++// the runtime of this process, as returned by the runtime without ++// modification. ++func ProcessRuntimeVersion(val string) attribute.KeyValue { ++ return ProcessRuntimeVersionKey.String(val) ++} ++ ++// ProcessRuntimeDescription returns an attribute KeyValue conforming to the ++// "process.runtime.description" semantic conventions. It represents an ++// additional description about the runtime of the process, for example a ++// specific vendor customization of the runtime environment. ++func ProcessRuntimeDescription(val string) attribute.KeyValue { ++ return ProcessRuntimeDescriptionKey.String(val) ++} ++ ++// A service instance. ++const ( ++ // ServiceNameKey is the attribute Key conforming to the "service.name" ++ // semantic conventions. It represents the logical name of the service. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'shoppingcart' ++ // Note: MUST be the same for all instances of horizontally scaled ++ // services. If the value was not specified, SDKs MUST fallback to ++ // `unknown_service:` concatenated with ++ // [`process.executable.name`](process.md#process), e.g. ++ // `unknown_service:bash`. If `process.executable.name` is not available, ++ // the value MUST be set to `unknown_service`. ++ ServiceNameKey = attribute.Key("service.name") ++ ++ // ServiceVersionKey is the attribute Key conforming to the ++ // "service.version" semantic conventions. It represents the version string ++ // of the service API or implementation. The format is not defined by these ++ // conventions. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2.0.0', 'a01dbef8a' ++ ServiceVersionKey = attribute.Key("service.version") ++) ++ ++// ServiceName returns an attribute KeyValue conforming to the ++// "service.name" semantic conventions. It represents the logical name of the ++// service. ++func ServiceName(val string) attribute.KeyValue { ++ return ServiceNameKey.String(val) ++} ++ ++// ServiceVersion returns an attribute KeyValue conforming to the ++// "service.version" semantic conventions. It represents the version string of ++// the service API or implementation. The format is not defined by these ++// conventions. ++func ServiceVersion(val string) attribute.KeyValue { ++ return ServiceVersionKey.String(val) ++} ++ ++// A service instance. ++const ( ++ // ServiceNamespaceKey is the attribute Key conforming to the ++ // "service.namespace" semantic conventions. It represents a namespace for ++ // `service.name`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Shop' ++ // Note: A string value having a meaning that helps to distinguish a group ++ // of services, for example the team name that owns a group of services. ++ // `service.name` is expected to be unique within the same namespace. If ++ // `service.namespace` is not specified in the Resource then `service.name` ++ // is expected to be unique for all services that have no explicit ++ // namespace defined (so the empty/unspecified namespace is simply one more ++ // valid namespace). Zero-length namespace string is assumed equal to ++ // unspecified namespace. ++ ServiceNamespaceKey = attribute.Key("service.namespace") ++ ++ // ServiceInstanceIDKey is the attribute Key conforming to the ++ // "service.instance.id" semantic conventions. It represents the string ID ++ // of the service instance. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'my-k8s-pod-deployment-1', ++ // '627cc493-f310-47de-96bd-71410b7dec09' ++ // Note: MUST be unique for each instance of the same ++ // `service.namespace,service.name` pair (in other words ++ // `service.namespace,service.name,service.instance.id` triplet MUST be ++ // globally unique). The ID helps to distinguish instances of the same ++ // service that exist at the same time (e.g. instances of a horizontally ++ // scaled service). It is preferable for the ID to be persistent and stay ++ // the same for the lifetime of the service instance, however it is ++ // acceptable that the ID is ephemeral and changes during important ++ // lifetime events for the service (e.g. service restarts). If the service ++ // has no inherent unique ID that can be used as the value of this ++ // attribute it is recommended to generate a random Version 1 or Version 4 ++ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use ++ // Version 5, see RFC 4122 for more recommendations). ++ ServiceInstanceIDKey = attribute.Key("service.instance.id") ++) ++ ++// ServiceNamespace returns an attribute KeyValue conforming to the ++// "service.namespace" semantic conventions. It represents a namespace for ++// `service.name`. ++func ServiceNamespace(val string) attribute.KeyValue { ++ return ServiceNamespaceKey.String(val) ++} ++ ++// ServiceInstanceID returns an attribute KeyValue conforming to the ++// "service.instance.id" semantic conventions. It represents the string ID of ++// the service instance. ++func ServiceInstanceID(val string) attribute.KeyValue { ++ return ServiceInstanceIDKey.String(val) ++} ++ ++// The telemetry SDK used to capture data recorded by the instrumentation ++// libraries. ++const ( ++ // TelemetrySDKNameKey is the attribute Key conforming to the ++ // "telemetry.sdk.name" semantic conventions. It represents the name of the ++ // telemetry SDK as defined above. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'opentelemetry' ++ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute ++ // to `opentelemetry`. ++ // If another SDK, like a fork or a vendor-provided implementation, is ++ // used, this SDK MUST set the ++ // `telemetry.sdk.name` attribute to the fully-qualified class or module ++ // name of this SDK's main entry point ++ // or another suitable identifier depending on the language. ++ // The identifier `opentelemetry` is reserved and MUST NOT be used in this ++ // case. ++ // All custom identifiers SHOULD be stable across different versions of an ++ // implementation. ++ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") ++ ++ // TelemetrySDKLanguageKey is the attribute Key conforming to the ++ // "telemetry.sdk.language" semantic conventions. It represents the ++ // language of the telemetry SDK. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") ++ ++ // TelemetrySDKVersionKey is the attribute Key conforming to the ++ // "telemetry.sdk.version" semantic conventions. It represents the version ++ // string of the telemetry SDK. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: '1.2.3' ++ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") ++) ++ ++var ( ++ // cpp ++ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") ++ // dotnet ++ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") ++ // erlang ++ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") ++ // go ++ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") ++ // java ++ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") ++ // nodejs ++ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") ++ // php ++ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") ++ // python ++ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") ++ // ruby ++ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") ++ // rust ++ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") ++ // swift ++ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ++ // webjs ++ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") ++) ++ ++// TelemetrySDKName returns an attribute KeyValue conforming to the ++// "telemetry.sdk.name" semantic conventions. It represents the name of the ++// telemetry SDK as defined above. ++func TelemetrySDKName(val string) attribute.KeyValue { ++ return TelemetrySDKNameKey.String(val) ++} ++ ++// TelemetrySDKVersion returns an attribute KeyValue conforming to the ++// "telemetry.sdk.version" semantic conventions. It represents the version ++// string of the telemetry SDK. ++func TelemetrySDKVersion(val string) attribute.KeyValue { ++ return TelemetrySDKVersionKey.String(val) ++} ++ ++// The telemetry SDK used to capture data recorded by the instrumentation ++// libraries. ++const ( ++ // TelemetryAutoVersionKey is the attribute Key conforming to the ++ // "telemetry.auto.version" semantic conventions. It represents the version ++ // string of the auto instrumentation agent, if used. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.2.3' ++ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ++) ++ ++// TelemetryAutoVersion returns an attribute KeyValue conforming to the ++// "telemetry.auto.version" semantic conventions. It represents the version ++// string of the auto instrumentation agent, if used. ++func TelemetryAutoVersion(val string) attribute.KeyValue { ++ return TelemetryAutoVersionKey.String(val) ++} ++ ++// Resource describing the packaged software running the application code. Web ++// engines are typically executed using process.runtime. ++const ( ++ // WebEngineNameKey is the attribute Key conforming to the "webengine.name" ++ // semantic conventions. It represents the name of the web engine. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'WildFly' ++ WebEngineNameKey = attribute.Key("webengine.name") ++ ++ // WebEngineVersionKey is the attribute Key conforming to the ++ // "webengine.version" semantic conventions. It represents the version of ++ // the web engine. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '21.0.0' ++ WebEngineVersionKey = attribute.Key("webengine.version") ++ ++ // WebEngineDescriptionKey is the attribute Key conforming to the ++ // "webengine.description" semantic conventions. It represents the ++ // additional description of the web engine (e.g. detailed version and ++ // edition information). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - ++ // 2.2.2.Final' ++ WebEngineDescriptionKey = attribute.Key("webengine.description") ++) ++ ++// WebEngineName returns an attribute KeyValue conforming to the ++// "webengine.name" semantic conventions. It represents the name of the web ++// engine. ++func WebEngineName(val string) attribute.KeyValue { ++ return WebEngineNameKey.String(val) ++} ++ ++// WebEngineVersion returns an attribute KeyValue conforming to the ++// "webengine.version" semantic conventions. It represents the version of the ++// web engine. ++func WebEngineVersion(val string) attribute.KeyValue { ++ return WebEngineVersionKey.String(val) ++} ++ ++// WebEngineDescription returns an attribute KeyValue conforming to the ++// "webengine.description" semantic conventions. It represents the additional ++// description of the web engine (e.g. detailed version and edition ++// information). ++func WebEngineDescription(val string) attribute.KeyValue { ++ return WebEngineDescriptionKey.String(val) ++} ++ ++// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's ++// concepts. ++const ( ++ // OTelScopeNameKey is the attribute Key conforming to the ++ // "otel.scope.name" semantic conventions. It represents the name of the ++ // instrumentation scope - (`InstrumentationScope.Name` in OTLP). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'io.opentelemetry.contrib.mongodb' ++ OTelScopeNameKey = attribute.Key("otel.scope.name") ++ ++ // OTelScopeVersionKey is the attribute Key conforming to the ++ // "otel.scope.version" semantic conventions. It represents the version of ++ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.0.0' ++ OTelScopeVersionKey = attribute.Key("otel.scope.version") ++) ++ ++// OTelScopeName returns an attribute KeyValue conforming to the ++// "otel.scope.name" semantic conventions. It represents the name of the ++// instrumentation scope - (`InstrumentationScope.Name` in OTLP). ++func OTelScopeName(val string) attribute.KeyValue { ++ return OTelScopeNameKey.String(val) ++} ++ ++// OTelScopeVersion returns an attribute KeyValue conforming to the ++// "otel.scope.version" semantic conventions. It represents the version of the ++// instrumentation scope - (`InstrumentationScope.Version` in OTLP). ++func OTelScopeVersion(val string) attribute.KeyValue { ++ return OTelScopeVersionKey.String(val) ++} ++ ++// Span attributes used by non-OTLP exporters to represent OpenTelemetry ++// Scope's concepts. ++const ( ++ // OTelLibraryNameKey is the attribute Key conforming to the ++ // "otel.library.name" semantic conventions. It represents the deprecated, ++ // use the `otel.scope.name` attribute. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: 'io.opentelemetry.contrib.mongodb' ++ OTelLibraryNameKey = attribute.Key("otel.library.name") ++ ++ // OTelLibraryVersionKey is the attribute Key conforming to the ++ // "otel.library.version" semantic conventions. It represents the ++ // deprecated, use the `otel.scope.version` attribute. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: deprecated ++ // Examples: '1.0.0' ++ OTelLibraryVersionKey = attribute.Key("otel.library.version") ++) ++ ++// OTelLibraryName returns an attribute KeyValue conforming to the ++// "otel.library.name" semantic conventions. It represents the deprecated, use ++// the `otel.scope.name` attribute. ++func OTelLibraryName(val string) attribute.KeyValue { ++ return OTelLibraryNameKey.String(val) ++} ++ ++// OTelLibraryVersion returns an attribute KeyValue conforming to the ++// "otel.library.version" semantic conventions. It represents the deprecated, ++// use the `otel.scope.version` attribute. ++func OTelLibraryVersion(val string) attribute.KeyValue { ++ return OTelLibraryVersionKey.String(val) ++} +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go +new file mode 100644 +index 00000000000..66ffd5989f3 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go +@@ -0,0 +1,20 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++// SchemaURL is the schema URL that matches the version of the semantic conventions ++// that this package defines. Semconv packages starting from v1.4.0 must declare ++// non-empty schema URL in the form https://opentelemetry.io/schemas/ ++const SchemaURL = "https://opentelemetry.io/schemas/1.21.0" +diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go +new file mode 100644 +index 00000000000..b5a91450d42 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go +@@ -0,0 +1,2495 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Code generated from semantic convention specification. DO NOT EDIT. ++ ++package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" ++ ++import "go.opentelemetry.io/otel/attribute" ++ ++// The shared attributes used to report a single exception associated with a ++// span or log. ++const ( ++ // ExceptionTypeKey is the attribute Key conforming to the "exception.type" ++ // semantic conventions. It represents the type of the exception (its ++ // fully-qualified class name, if applicable). The dynamic type of the ++ // exception should be preferred over the static type in languages that ++ // support it. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'java.net.ConnectException', 'OSError' ++ ExceptionTypeKey = attribute.Key("exception.type") ++ ++ // ExceptionMessageKey is the attribute Key conforming to the ++ // "exception.message" semantic conventions. It represents the exception ++ // message. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Division by zero', "Can't convert 'int' object to str ++ // implicitly" ++ ExceptionMessageKey = attribute.Key("exception.message") ++ ++ // ExceptionStacktraceKey is the attribute Key conforming to the ++ // "exception.stacktrace" semantic conventions. It represents a stacktrace ++ // as a string in the natural representation for the language runtime. The ++ // representation is to be determined and documented by each language SIG. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test ++ // exception\\n at ' ++ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' ++ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' ++ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ++ ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ++) ++ ++// ExceptionType returns an attribute KeyValue conforming to the ++// "exception.type" semantic conventions. It represents the type of the ++// exception (its fully-qualified class name, if applicable). The dynamic type ++// of the exception should be preferred over the static type in languages that ++// support it. ++func ExceptionType(val string) attribute.KeyValue { ++ return ExceptionTypeKey.String(val) ++} ++ ++// ExceptionMessage returns an attribute KeyValue conforming to the ++// "exception.message" semantic conventions. It represents the exception ++// message. ++func ExceptionMessage(val string) attribute.KeyValue { ++ return ExceptionMessageKey.String(val) ++} ++ ++// ExceptionStacktrace returns an attribute KeyValue conforming to the ++// "exception.stacktrace" semantic conventions. It represents a stacktrace as a ++// string in the natural representation for the language runtime. The ++// representation is to be determined and documented by each language SIG. ++func ExceptionStacktrace(val string) attribute.KeyValue { ++ return ExceptionStacktraceKey.String(val) ++} ++ ++// Span attributes used by AWS Lambda (in addition to general `faas` ++// attributes). ++const ( ++ // AWSLambdaInvokedARNKey is the attribute Key conforming to the ++ // "aws.lambda.invoked_arn" semantic conventions. It represents the full ++ // invoked ARN as provided on the `Context` passed to the function ++ // (`Lambda-Runtime-Invoked-Function-ARN` header on the ++ // `/runtime/invocation/next` applicable). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' ++ // Note: This may be different from `cloud.resource_id` if an alias is ++ // involved. ++ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ++) ++ ++// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the ++// "aws.lambda.invoked_arn" semantic conventions. It represents the full ++// invoked ARN as provided on the `Context` passed to the function ++// (`Lambda-Runtime-Invoked-Function-ARN` header on the ++// `/runtime/invocation/next` applicable). ++func AWSLambdaInvokedARN(val string) attribute.KeyValue { ++ return AWSLambdaInvokedARNKey.String(val) ++} ++ ++// Attributes for CloudEvents. CloudEvents is a specification on how to define ++// event data in a standard way. These attributes can be attached to spans when ++// performing operations with CloudEvents, regardless of the protocol being ++// used. ++const ( ++ // CloudeventsEventIDKey is the attribute Key conforming to the ++ // "cloudevents.event_id" semantic conventions. It represents the ++ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) ++ // uniquely identifies the event. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' ++ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") ++ ++ // CloudeventsEventSourceKey is the attribute Key conforming to the ++ // "cloudevents.event_source" semantic conventions. It represents the ++ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) ++ // identifies the context in which an event happened. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'https://github.com/cloudevents', ++ // '/cloudevents/spec/pull/123', 'my-service' ++ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") ++ ++ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the ++ // "cloudevents.event_spec_version" semantic conventions. It represents the ++ // [version of the CloudEvents ++ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) ++ // which the event uses. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '1.0' ++ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") ++ ++ // CloudeventsEventTypeKey is the attribute Key conforming to the ++ // "cloudevents.event_type" semantic conventions. It represents the ++ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) ++ // contains a value describing the type of event related to the originating ++ // occurrence. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'com.github.pull_request.opened', ++ // 'com.example.object.deleted.v2' ++ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") ++ ++ // CloudeventsEventSubjectKey is the attribute Key conforming to the ++ // "cloudevents.event_subject" semantic conventions. It represents the ++ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) ++ // of the event in the context of the event producer (identified by ++ // source). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'mynewfile.jpg' ++ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ++) ++ ++// CloudeventsEventID returns an attribute KeyValue conforming to the ++// "cloudevents.event_id" semantic conventions. It represents the ++// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) ++// uniquely identifies the event. ++func CloudeventsEventID(val string) attribute.KeyValue { ++ return CloudeventsEventIDKey.String(val) ++} ++ ++// CloudeventsEventSource returns an attribute KeyValue conforming to the ++// "cloudevents.event_source" semantic conventions. It represents the ++// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) ++// identifies the context in which an event happened. ++func CloudeventsEventSource(val string) attribute.KeyValue { ++ return CloudeventsEventSourceKey.String(val) ++} ++ ++// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to ++// the "cloudevents.event_spec_version" semantic conventions. It represents the ++// [version of the CloudEvents ++// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) ++// which the event uses. ++func CloudeventsEventSpecVersion(val string) attribute.KeyValue { ++ return CloudeventsEventSpecVersionKey.String(val) ++} ++ ++// CloudeventsEventType returns an attribute KeyValue conforming to the ++// "cloudevents.event_type" semantic conventions. It represents the ++// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) ++// contains a value describing the type of event related to the originating ++// occurrence. ++func CloudeventsEventType(val string) attribute.KeyValue { ++ return CloudeventsEventTypeKey.String(val) ++} ++ ++// CloudeventsEventSubject returns an attribute KeyValue conforming to the ++// "cloudevents.event_subject" semantic conventions. It represents the ++// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) ++// of the event in the context of the event producer (identified by source). ++func CloudeventsEventSubject(val string) attribute.KeyValue { ++ return CloudeventsEventSubjectKey.String(val) ++} ++ ++// Semantic conventions for the OpenTracing Shim ++const ( ++ // OpentracingRefTypeKey is the attribute Key conforming to the ++ // "opentracing.ref_type" semantic conventions. It represents the ++ // parent-child Reference type ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: The causal relationship between a child Span and a parent Span. ++ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ++) ++ ++var ( ++ // The parent Span depends on the child Span in some capacity ++ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") ++ // The parent Span does not depend in any way on the result of the child Span ++ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ++) ++ ++// The attributes used to perform database client calls. ++const ( ++ // DBSystemKey is the attribute Key conforming to the "db.system" semantic ++ // conventions. It represents an identifier for the database management ++ // system (DBMS) product being used. See below for a list of well-known ++ // identifiers. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ DBSystemKey = attribute.Key("db.system") ++ ++ // DBConnectionStringKey is the attribute Key conforming to the ++ // "db.connection_string" semantic conventions. It represents the ++ // connection string used to connect to the database. It is recommended to ++ // remove embedded credentials. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' ++ DBConnectionStringKey = attribute.Key("db.connection_string") ++ ++ // DBUserKey is the attribute Key conforming to the "db.user" semantic ++ // conventions. It represents the username for accessing the database. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'readonly_user', 'reporting_user' ++ DBUserKey = attribute.Key("db.user") ++ ++ // DBJDBCDriverClassnameKey is the attribute Key conforming to the ++ // "db.jdbc.driver_classname" semantic conventions. It represents the ++ // fully-qualified class name of the [Java Database Connectivity ++ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) ++ // driver used to connect. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'org.postgresql.Driver', ++ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' ++ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") ++ ++ // DBNameKey is the attribute Key conforming to the "db.name" semantic ++ // conventions. It represents the this attribute is used to report the name ++ // of the database being accessed. For commands that switch the database, ++ // this should be set to the target database (even if the command fails). ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If applicable.) ++ // Stability: stable ++ // Examples: 'customers', 'main' ++ // Note: In some SQL databases, the database name to be used is called ++ // "schema name". In case there are multiple layers that could be ++ // considered for database name (e.g. Oracle instance name and schema ++ // name), the database name to be used is the more specific layer (e.g. ++ // Oracle schema name). ++ DBNameKey = attribute.Key("db.name") ++ ++ // DBStatementKey is the attribute Key conforming to the "db.statement" ++ // semantic conventions. It represents the database statement being ++ // executed. ++ // ++ // Type: string ++ // RequirementLevel: Recommended (Should be collected by default only if ++ // there is sanitization that excludes sensitive information.) ++ // Stability: stable ++ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' ++ DBStatementKey = attribute.Key("db.statement") ++ ++ // DBOperationKey is the attribute Key conforming to the "db.operation" ++ // semantic conventions. It represents the name of the operation being ++ // executed, e.g. the [MongoDB command ++ // name](https://docs.mongodb.com/manual/reference/command/#database-operations) ++ // such as `findAndModify`, or the SQL keyword. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If `db.statement` is not ++ // applicable.) ++ // Stability: stable ++ // Examples: 'findAndModify', 'HMSET', 'SELECT' ++ // Note: When setting this to an SQL keyword, it is not recommended to ++ // attempt any client-side parsing of `db.statement` just to get this ++ // property, but it should be set if the operation name is provided by the ++ // library being instrumented. If the SQL statement has an ambiguous ++ // operation, or performs more than one operation, this value may be ++ // omitted. ++ DBOperationKey = attribute.Key("db.operation") ++) ++ ++var ( ++ // Some other SQL database. Fallback only. See notes ++ DBSystemOtherSQL = DBSystemKey.String("other_sql") ++ // Microsoft SQL Server ++ DBSystemMSSQL = DBSystemKey.String("mssql") ++ // Microsoft SQL Server Compact ++ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") ++ // MySQL ++ DBSystemMySQL = DBSystemKey.String("mysql") ++ // Oracle Database ++ DBSystemOracle = DBSystemKey.String("oracle") ++ // IBM DB2 ++ DBSystemDB2 = DBSystemKey.String("db2") ++ // PostgreSQL ++ DBSystemPostgreSQL = DBSystemKey.String("postgresql") ++ // Amazon Redshift ++ DBSystemRedshift = DBSystemKey.String("redshift") ++ // Apache Hive ++ DBSystemHive = DBSystemKey.String("hive") ++ // Cloudscape ++ DBSystemCloudscape = DBSystemKey.String("cloudscape") ++ // HyperSQL DataBase ++ DBSystemHSQLDB = DBSystemKey.String("hsqldb") ++ // Progress Database ++ DBSystemProgress = DBSystemKey.String("progress") ++ // SAP MaxDB ++ DBSystemMaxDB = DBSystemKey.String("maxdb") ++ // SAP HANA ++ DBSystemHanaDB = DBSystemKey.String("hanadb") ++ // Ingres ++ DBSystemIngres = DBSystemKey.String("ingres") ++ // FirstSQL ++ DBSystemFirstSQL = DBSystemKey.String("firstsql") ++ // EnterpriseDB ++ DBSystemEDB = DBSystemKey.String("edb") ++ // InterSystems Caché ++ DBSystemCache = DBSystemKey.String("cache") ++ // Adabas (Adaptable Database System) ++ DBSystemAdabas = DBSystemKey.String("adabas") ++ // Firebird ++ DBSystemFirebird = DBSystemKey.String("firebird") ++ // Apache Derby ++ DBSystemDerby = DBSystemKey.String("derby") ++ // FileMaker ++ DBSystemFilemaker = DBSystemKey.String("filemaker") ++ // Informix ++ DBSystemInformix = DBSystemKey.String("informix") ++ // InstantDB ++ DBSystemInstantDB = DBSystemKey.String("instantdb") ++ // InterBase ++ DBSystemInterbase = DBSystemKey.String("interbase") ++ // MariaDB ++ DBSystemMariaDB = DBSystemKey.String("mariadb") ++ // Netezza ++ DBSystemNetezza = DBSystemKey.String("netezza") ++ // Pervasive PSQL ++ DBSystemPervasive = DBSystemKey.String("pervasive") ++ // PointBase ++ DBSystemPointbase = DBSystemKey.String("pointbase") ++ // SQLite ++ DBSystemSqlite = DBSystemKey.String("sqlite") ++ // Sybase ++ DBSystemSybase = DBSystemKey.String("sybase") ++ // Teradata ++ DBSystemTeradata = DBSystemKey.String("teradata") ++ // Vertica ++ DBSystemVertica = DBSystemKey.String("vertica") ++ // H2 ++ DBSystemH2 = DBSystemKey.String("h2") ++ // ColdFusion IMQ ++ DBSystemColdfusion = DBSystemKey.String("coldfusion") ++ // Apache Cassandra ++ DBSystemCassandra = DBSystemKey.String("cassandra") ++ // Apache HBase ++ DBSystemHBase = DBSystemKey.String("hbase") ++ // MongoDB ++ DBSystemMongoDB = DBSystemKey.String("mongodb") ++ // Redis ++ DBSystemRedis = DBSystemKey.String("redis") ++ // Couchbase ++ DBSystemCouchbase = DBSystemKey.String("couchbase") ++ // CouchDB ++ DBSystemCouchDB = DBSystemKey.String("couchdb") ++ // Microsoft Azure Cosmos DB ++ DBSystemCosmosDB = DBSystemKey.String("cosmosdb") ++ // Amazon DynamoDB ++ DBSystemDynamoDB = DBSystemKey.String("dynamodb") ++ // Neo4j ++ DBSystemNeo4j = DBSystemKey.String("neo4j") ++ // Apache Geode ++ DBSystemGeode = DBSystemKey.String("geode") ++ // Elasticsearch ++ DBSystemElasticsearch = DBSystemKey.String("elasticsearch") ++ // Memcached ++ DBSystemMemcached = DBSystemKey.String("memcached") ++ // CockroachDB ++ DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ++ // OpenSearch ++ DBSystemOpensearch = DBSystemKey.String("opensearch") ++ // ClickHouse ++ DBSystemClickhouse = DBSystemKey.String("clickhouse") ++ // Cloud Spanner ++ DBSystemSpanner = DBSystemKey.String("spanner") ++ // Trino ++ DBSystemTrino = DBSystemKey.String("trino") ++) ++ ++// DBConnectionString returns an attribute KeyValue conforming to the ++// "db.connection_string" semantic conventions. It represents the connection ++// string used to connect to the database. It is recommended to remove embedded ++// credentials. ++func DBConnectionString(val string) attribute.KeyValue { ++ return DBConnectionStringKey.String(val) ++} ++ ++// DBUser returns an attribute KeyValue conforming to the "db.user" semantic ++// conventions. It represents the username for accessing the database. ++func DBUser(val string) attribute.KeyValue { ++ return DBUserKey.String(val) ++} ++ ++// DBJDBCDriverClassname returns an attribute KeyValue conforming to the ++// "db.jdbc.driver_classname" semantic conventions. It represents the ++// fully-qualified class name of the [Java Database Connectivity ++// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver ++// used to connect. ++func DBJDBCDriverClassname(val string) attribute.KeyValue { ++ return DBJDBCDriverClassnameKey.String(val) ++} ++ ++// DBName returns an attribute KeyValue conforming to the "db.name" semantic ++// conventions. It represents the this attribute is used to report the name of ++// the database being accessed. For commands that switch the database, this ++// should be set to the target database (even if the command fails). ++func DBName(val string) attribute.KeyValue { ++ return DBNameKey.String(val) ++} ++ ++// DBStatement returns an attribute KeyValue conforming to the ++// "db.statement" semantic conventions. It represents the database statement ++// being executed. ++func DBStatement(val string) attribute.KeyValue { ++ return DBStatementKey.String(val) ++} ++ ++// DBOperation returns an attribute KeyValue conforming to the ++// "db.operation" semantic conventions. It represents the name of the operation ++// being executed, e.g. the [MongoDB command ++// name](https://docs.mongodb.com/manual/reference/command/#database-operations) ++// such as `findAndModify`, or the SQL keyword. ++func DBOperation(val string) attribute.KeyValue { ++ return DBOperationKey.String(val) ++} ++ ++// Connection-level attributes for Microsoft SQL Server ++const ( ++ // DBMSSQLInstanceNameKey is the attribute Key conforming to the ++ // "db.mssql.instance_name" semantic conventions. It represents the ++ // Microsoft SQL Server [instance ++ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) ++ // connecting to. This name is used to determine the port of a named ++ // instance. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'MSSQLSERVER' ++ // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer ++ // required (but still recommended if non-standard). ++ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ++) ++ ++// DBMSSQLInstanceName returns an attribute KeyValue conforming to the ++// "db.mssql.instance_name" semantic conventions. It represents the Microsoft ++// SQL Server [instance ++// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) ++// connecting to. This name is used to determine the port of a named instance. ++func DBMSSQLInstanceName(val string) attribute.KeyValue { ++ return DBMSSQLInstanceNameKey.String(val) ++} ++ ++// Call-level attributes for Cassandra ++const ( ++ // DBCassandraPageSizeKey is the attribute Key conforming to the ++ // "db.cassandra.page_size" semantic conventions. It represents the fetch ++ // size used for paging, i.e. how many rows will be returned at once. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 5000 ++ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") ++ ++ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the ++ // "db.cassandra.consistency_level" semantic conventions. It represents the ++ // consistency level of the query. Based on consistency values from ++ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") ++ ++ // DBCassandraTableKey is the attribute Key conforming to the ++ // "db.cassandra.table" semantic conventions. It represents the name of the ++ // primary table that the operation is acting upon, including the keyspace ++ // name (if applicable). ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'mytable' ++ // Note: This mirrors the db.sql.table attribute but references cassandra ++ // rather than sql. It is not recommended to attempt any client-side ++ // parsing of `db.statement` just to get this property, but it should be ++ // set if it is provided by the library being instrumented. If the ++ // operation is acting upon an anonymous table, or more than one table, ++ // this value MUST NOT be set. ++ DBCassandraTableKey = attribute.Key("db.cassandra.table") ++ ++ // DBCassandraIdempotenceKey is the attribute Key conforming to the ++ // "db.cassandra.idempotence" semantic conventions. It represents the ++ // whether or not the query is idempotent. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") ++ ++ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming ++ // to the "db.cassandra.speculative_execution_count" semantic conventions. ++ // It represents the number of times a query was speculatively executed. ++ // Not set or `0` if the query was not executed speculatively. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 0, 2 ++ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") ++ ++ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the ++ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID ++ // of the coordinating node for a query. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' ++ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") ++ ++ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the ++ // "db.cassandra.coordinator.dc" semantic conventions. It represents the ++ // data center of the coordinating node for a query. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'us-west-2' ++ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ++) ++ ++var ( ++ // all ++ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") ++ // each_quorum ++ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") ++ // quorum ++ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") ++ // local_quorum ++ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") ++ // one ++ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") ++ // two ++ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") ++ // three ++ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") ++ // local_one ++ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") ++ // any ++ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") ++ // serial ++ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") ++ // local_serial ++ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ++) ++ ++// DBCassandraPageSize returns an attribute KeyValue conforming to the ++// "db.cassandra.page_size" semantic conventions. It represents the fetch size ++// used for paging, i.e. how many rows will be returned at once. ++func DBCassandraPageSize(val int) attribute.KeyValue { ++ return DBCassandraPageSizeKey.Int(val) ++} ++ ++// DBCassandraTable returns an attribute KeyValue conforming to the ++// "db.cassandra.table" semantic conventions. It represents the name of the ++// primary table that the operation is acting upon, including the keyspace name ++// (if applicable). ++func DBCassandraTable(val string) attribute.KeyValue { ++ return DBCassandraTableKey.String(val) ++} ++ ++// DBCassandraIdempotence returns an attribute KeyValue conforming to the ++// "db.cassandra.idempotence" semantic conventions. It represents the whether ++// or not the query is idempotent. ++func DBCassandraIdempotence(val bool) attribute.KeyValue { ++ return DBCassandraIdempotenceKey.Bool(val) ++} ++ ++// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue ++// conforming to the "db.cassandra.speculative_execution_count" semantic ++// conventions. It represents the number of times a query was speculatively ++// executed. Not set or `0` if the query was not executed speculatively. ++func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { ++ return DBCassandraSpeculativeExecutionCountKey.Int(val) ++} ++ ++// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the ++// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of ++// the coordinating node for a query. ++func DBCassandraCoordinatorID(val string) attribute.KeyValue { ++ return DBCassandraCoordinatorIDKey.String(val) ++} ++ ++// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the ++// "db.cassandra.coordinator.dc" semantic conventions. It represents the data ++// center of the coordinating node for a query. ++func DBCassandraCoordinatorDC(val string) attribute.KeyValue { ++ return DBCassandraCoordinatorDCKey.String(val) ++} ++ ++// Call-level attributes for Redis ++const ( ++ // DBRedisDBIndexKey is the attribute Key conforming to the ++ // "db.redis.database_index" semantic conventions. It represents the index ++ // of the database being accessed as used in the [`SELECT` ++ // command](https://redis.io/commands/select), provided as an integer. To ++ // be used instead of the generic `db.name` attribute. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If other than the default ++ // database (`0`).) ++ // Stability: stable ++ // Examples: 0, 1, 15 ++ DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ++) ++ ++// DBRedisDBIndex returns an attribute KeyValue conforming to the ++// "db.redis.database_index" semantic conventions. It represents the index of ++// the database being accessed as used in the [`SELECT` ++// command](https://redis.io/commands/select), provided as an integer. To be ++// used instead of the generic `db.name` attribute. ++func DBRedisDBIndex(val int) attribute.KeyValue { ++ return DBRedisDBIndexKey.Int(val) ++} ++ ++// Call-level attributes for MongoDB ++const ( ++ // DBMongoDBCollectionKey is the attribute Key conforming to the ++ // "db.mongodb.collection" semantic conventions. It represents the ++ // collection being accessed within the database stated in `db.name`. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'customers', 'products' ++ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ++) ++ ++// DBMongoDBCollection returns an attribute KeyValue conforming to the ++// "db.mongodb.collection" semantic conventions. It represents the collection ++// being accessed within the database stated in `db.name`. ++func DBMongoDBCollection(val string) attribute.KeyValue { ++ return DBMongoDBCollectionKey.String(val) ++} ++ ++// Call-level attributes for SQL databases ++const ( ++ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" ++ // semantic conventions. It represents the name of the primary table that ++ // the operation is acting upon, including the database name (if ++ // applicable). ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'public.users', 'customers' ++ // Note: It is not recommended to attempt any client-side parsing of ++ // `db.statement` just to get this property, but it should be set if it is ++ // provided by the library being instrumented. If the operation is acting ++ // upon an anonymous table, or more than one table, this value MUST NOT be ++ // set. ++ DBSQLTableKey = attribute.Key("db.sql.table") ++) ++ ++// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" ++// semantic conventions. It represents the name of the primary table that the ++// operation is acting upon, including the database name (if applicable). ++func DBSQLTable(val string) attribute.KeyValue { ++ return DBSQLTableKey.String(val) ++} ++ ++// Call-level attributes for Cosmos DB. ++const ( ++ // DBCosmosDBClientIDKey is the attribute Key conforming to the ++ // "db.cosmosdb.client_id" semantic conventions. It represents the unique ++ // Cosmos client instance id. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' ++ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") ++ ++ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the ++ // "db.cosmosdb.operation_type" semantic conventions. It represents the ++ // cosmosDB Operation Type. ++ // ++ // Type: Enum ++ // RequirementLevel: ConditionallyRequired (when performing one of the ++ // operations in this list) ++ // Stability: stable ++ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") ++ ++ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the ++ // "db.cosmosdb.connection_mode" semantic conventions. It represents the ++ // cosmos client connection mode. ++ // ++ // Type: Enum ++ // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as ++ // default)) ++ // Stability: stable ++ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") ++ ++ // DBCosmosDBContainerKey is the attribute Key conforming to the ++ // "db.cosmosdb.container" semantic conventions. It represents the cosmos ++ // DB container name. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (if available) ++ // Stability: stable ++ // Examples: 'anystring' ++ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") ++ ++ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the ++ // "db.cosmosdb.request_content_length" semantic conventions. It represents ++ // the request payload size in bytes ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") ++ ++ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the ++ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos ++ // DB status code. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (if response was received) ++ // Stability: stable ++ // Examples: 200, 201 ++ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") ++ ++ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the ++ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the ++ // cosmos DB sub status code. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (when response was received and ++ // contained sub-code.) ++ // Stability: stable ++ // Examples: 1000, 1002 ++ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") ++ ++ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the ++ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU ++ // consumed for that operation ++ // ++ // Type: double ++ // RequirementLevel: ConditionallyRequired (when available) ++ // Stability: stable ++ // Examples: 46.18, 1.0 ++ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") ++) ++ ++var ( ++ // invalid ++ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") ++ // create ++ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") ++ // patch ++ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") ++ // read ++ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") ++ // read_feed ++ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") ++ // delete ++ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") ++ // replace ++ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") ++ // execute ++ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") ++ // query ++ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") ++ // head ++ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") ++ // head_feed ++ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") ++ // upsert ++ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") ++ // batch ++ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") ++ // query_plan ++ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") ++ // execute_javascript ++ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") ++) ++ ++var ( ++ // Gateway (HTTP) connections mode ++ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") ++ // Direct connection ++ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") ++) ++ ++// DBCosmosDBClientID returns an attribute KeyValue conforming to the ++// "db.cosmosdb.client_id" semantic conventions. It represents the unique ++// Cosmos client instance id. ++func DBCosmosDBClientID(val string) attribute.KeyValue { ++ return DBCosmosDBClientIDKey.String(val) ++} ++ ++// DBCosmosDBContainer returns an attribute KeyValue conforming to the ++// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB ++// container name. ++func DBCosmosDBContainer(val string) attribute.KeyValue { ++ return DBCosmosDBContainerKey.String(val) ++} ++ ++// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming ++// to the "db.cosmosdb.request_content_length" semantic conventions. It ++// represents the request payload size in bytes ++func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { ++ return DBCosmosDBRequestContentLengthKey.Int(val) ++} ++ ++// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the ++// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB ++// status code. ++func DBCosmosDBStatusCode(val int) attribute.KeyValue { ++ return DBCosmosDBStatusCodeKey.Int(val) ++} ++ ++// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the ++// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos ++// DB sub status code. ++func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { ++ return DBCosmosDBSubStatusCodeKey.Int(val) ++} ++ ++// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the ++// "db.cosmosdb.request_charge" semantic conventions. It represents the rU ++// consumed for that operation ++func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { ++ return DBCosmosDBRequestChargeKey.Float64(val) ++} ++ ++// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's ++// concepts. ++const ( ++ // OTelStatusCodeKey is the attribute Key conforming to the ++ // "otel.status_code" semantic conventions. It represents the name of the ++ // code, either "OK" or "ERROR". MUST NOT be set if the status code is ++ // UNSET. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ OTelStatusCodeKey = attribute.Key("otel.status_code") ++ ++ // OTelStatusDescriptionKey is the attribute Key conforming to the ++ // "otel.status_description" semantic conventions. It represents the ++ // description of the Status if it has a value, otherwise not set. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'resource not found' ++ OTelStatusDescriptionKey = attribute.Key("otel.status_description") ++) ++ ++var ( ++ // The operation has been validated by an Application developer or Operator to have completed successfully ++ OTelStatusCodeOk = OTelStatusCodeKey.String("OK") ++ // The operation contains an error ++ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") ++) ++ ++// OTelStatusDescription returns an attribute KeyValue conforming to the ++// "otel.status_description" semantic conventions. It represents the ++// description of the Status if it has a value, otherwise not set. ++func OTelStatusDescription(val string) attribute.KeyValue { ++ return OTelStatusDescriptionKey.String(val) ++} ++ ++// This semantic convention describes an instance of a function that runs ++// without provisioning or managing of servers (also known as serverless ++// functions or Function as a Service (FaaS)) with spans. ++const ( ++ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" ++ // semantic conventions. It represents the type of the trigger which caused ++ // this function invocation. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Note: For the server/consumer span on the incoming side, ++ // `faas.trigger` MUST be set. ++ // ++ // Clients invoking FaaS instances usually cannot set `faas.trigger`, ++ // since they would typically need to look in the payload to determine ++ // the event type. If clients set it, it should be the same as the ++ // trigger that corresponding incoming would have (i.e., this has ++ // nothing to do with the underlying transport used to make the API ++ // call to invoke the lambda, which is often HTTP). ++ FaaSTriggerKey = attribute.Key("faas.trigger") ++ ++ // FaaSInvocationIDKey is the attribute Key conforming to the ++ // "faas.invocation_id" semantic conventions. It represents the invocation ++ // ID of the current function invocation. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' ++ FaaSInvocationIDKey = attribute.Key("faas.invocation_id") ++) ++ ++var ( ++ // A response to some data source operation such as a database or filesystem read/write ++ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") ++ // To provide an answer to an inbound HTTP request ++ FaaSTriggerHTTP = FaaSTriggerKey.String("http") ++ // A function is set to be executed when messages are sent to a messaging system ++ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") ++ // A function is scheduled to be executed regularly ++ FaaSTriggerTimer = FaaSTriggerKey.String("timer") ++ // If none of the others apply ++ FaaSTriggerOther = FaaSTriggerKey.String("other") ++) ++ ++// FaaSInvocationID returns an attribute KeyValue conforming to the ++// "faas.invocation_id" semantic conventions. It represents the invocation ID ++// of the current function invocation. ++func FaaSInvocationID(val string) attribute.KeyValue { ++ return FaaSInvocationIDKey.String(val) ++} ++ ++// Semantic Convention for FaaS triggered as a response to some data source ++// operation such as a database or filesystem read/write. ++const ( ++ // FaaSDocumentCollectionKey is the attribute Key conforming to the ++ // "faas.document.collection" semantic conventions. It represents the name ++ // of the source on which the triggering operation was performed. For ++ // example, in Cloud Storage or S3 corresponds to the bucket name, and in ++ // Cosmos DB to the database name. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'myBucketName', 'myDBName' ++ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") ++ ++ // FaaSDocumentOperationKey is the attribute Key conforming to the ++ // "faas.document.operation" semantic conventions. It represents the ++ // describes the type of the operation that was performed on the data. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ FaaSDocumentOperationKey = attribute.Key("faas.document.operation") ++ ++ // FaaSDocumentTimeKey is the attribute Key conforming to the ++ // "faas.document.time" semantic conventions. It represents a string ++ // containing the time when the data was accessed in the [ISO ++ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2020-01-23T13:47:06Z' ++ FaaSDocumentTimeKey = attribute.Key("faas.document.time") ++ ++ // FaaSDocumentNameKey is the attribute Key conforming to the ++ // "faas.document.name" semantic conventions. It represents the document ++ // name/table subjected to the operation. For example, in Cloud Storage or ++ // S3 is the name of the file, and in Cosmos DB the table name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'myFile.txt', 'myTableName' ++ FaaSDocumentNameKey = attribute.Key("faas.document.name") ++) ++ ++var ( ++ // When a new object is created ++ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") ++ // When an object is modified ++ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") ++ // When an object is deleted ++ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ++) ++ ++// FaaSDocumentCollection returns an attribute KeyValue conforming to the ++// "faas.document.collection" semantic conventions. It represents the name of ++// the source on which the triggering operation was performed. For example, in ++// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the ++// database name. ++func FaaSDocumentCollection(val string) attribute.KeyValue { ++ return FaaSDocumentCollectionKey.String(val) ++} ++ ++// FaaSDocumentTime returns an attribute KeyValue conforming to the ++// "faas.document.time" semantic conventions. It represents a string containing ++// the time when the data was accessed in the [ISO ++// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++func FaaSDocumentTime(val string) attribute.KeyValue { ++ return FaaSDocumentTimeKey.String(val) ++} ++ ++// FaaSDocumentName returns an attribute KeyValue conforming to the ++// "faas.document.name" semantic conventions. It represents the document ++// name/table subjected to the operation. For example, in Cloud Storage or S3 ++// is the name of the file, and in Cosmos DB the table name. ++func FaaSDocumentName(val string) attribute.KeyValue { ++ return FaaSDocumentNameKey.String(val) ++} ++ ++// Semantic Convention for FaaS scheduled to be executed regularly. ++const ( ++ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic ++ // conventions. It represents a string containing the function invocation ++ // time in the [ISO ++ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '2020-01-23T13:47:06Z' ++ FaaSTimeKey = attribute.Key("faas.time") ++ ++ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic ++ // conventions. It represents a string containing the schedule period as ++ // [Cron ++ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '0/5 * * * ? *' ++ FaaSCronKey = attribute.Key("faas.cron") ++) ++ ++// FaaSTime returns an attribute KeyValue conforming to the "faas.time" ++// semantic conventions. It represents a string containing the function ++// invocation time in the [ISO ++// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format ++// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). ++func FaaSTime(val string) attribute.KeyValue { ++ return FaaSTimeKey.String(val) ++} ++ ++// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" ++// semantic conventions. It represents a string containing the schedule period ++// as [Cron ++// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). ++func FaaSCron(val string) attribute.KeyValue { ++ return FaaSCronKey.String(val) ++} ++ ++// Contains additional attributes for incoming FaaS spans. ++const ( ++ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" ++ // semantic conventions. It represents a boolean that is true if the ++ // serverless function is executed for the first time (aka cold-start). ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ FaaSColdstartKey = attribute.Key("faas.coldstart") ++) ++ ++// FaaSColdstart returns an attribute KeyValue conforming to the ++// "faas.coldstart" semantic conventions. It represents a boolean that is true ++// if the serverless function is executed for the first time (aka cold-start). ++func FaaSColdstart(val bool) attribute.KeyValue { ++ return FaaSColdstartKey.Bool(val) ++} ++ ++// Contains additional attributes for outgoing FaaS spans. ++const ( ++ // FaaSInvokedNameKey is the attribute Key conforming to the ++ // "faas.invoked_name" semantic conventions. It represents the name of the ++ // invoked function. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'my-function' ++ // Note: SHOULD be equal to the `faas.name` resource attribute of the ++ // invoked function. ++ FaaSInvokedNameKey = attribute.Key("faas.invoked_name") ++ ++ // FaaSInvokedProviderKey is the attribute Key conforming to the ++ // "faas.invoked_provider" semantic conventions. It represents the cloud ++ // provider of the invoked function. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the ++ // invoked function. ++ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") ++ ++ // FaaSInvokedRegionKey is the attribute Key conforming to the ++ // "faas.invoked_region" semantic conventions. It represents the cloud ++ // region of the invoked function. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (For some cloud providers, like ++ // AWS or GCP, the region in which a function is hosted is essential to ++ // uniquely identify the function and also part of its endpoint. Since it's ++ // part of the endpoint being called, the region is always known to ++ // clients. In these cases, `faas.invoked_region` MUST be set accordingly. ++ // If the region is unknown to the client or not required for identifying ++ // the invoked function, setting `faas.invoked_region` is optional.) ++ // Stability: stable ++ // Examples: 'eu-central-1' ++ // Note: SHOULD be equal to the `cloud.region` resource attribute of the ++ // invoked function. ++ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ++) ++ ++var ( ++ // Alibaba Cloud ++ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") ++ // Amazon Web Services ++ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") ++ // Microsoft Azure ++ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") ++ // Google Cloud Platform ++ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") ++ // Tencent Cloud ++ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ++) ++ ++// FaaSInvokedName returns an attribute KeyValue conforming to the ++// "faas.invoked_name" semantic conventions. It represents the name of the ++// invoked function. ++func FaaSInvokedName(val string) attribute.KeyValue { ++ return FaaSInvokedNameKey.String(val) ++} ++ ++// FaaSInvokedRegion returns an attribute KeyValue conforming to the ++// "faas.invoked_region" semantic conventions. It represents the cloud region ++// of the invoked function. ++func FaaSInvokedRegion(val string) attribute.KeyValue { ++ return FaaSInvokedRegionKey.String(val) ++} ++ ++// Operations that access some remote service. ++const ( ++ // PeerServiceKey is the attribute Key conforming to the "peer.service" ++ // semantic conventions. It represents the ++ // [`service.name`](/docs/resource/README.md#service) of the remote ++ // service. SHOULD be equal to the actual `service.name` resource attribute ++ // of the remote service if any. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'AuthTokenCache' ++ PeerServiceKey = attribute.Key("peer.service") ++) ++ ++// PeerService returns an attribute KeyValue conforming to the ++// "peer.service" semantic conventions. It represents the ++// [`service.name`](/docs/resource/README.md#service) of the remote service. ++// SHOULD be equal to the actual `service.name` resource attribute of the ++// remote service if any. ++func PeerService(val string) attribute.KeyValue { ++ return PeerServiceKey.String(val) ++} ++ ++// These attributes may be used for any operation with an authenticated and/or ++// authorized enduser. ++const ( ++ // EnduserIDKey is the attribute Key conforming to the "enduser.id" ++ // semantic conventions. It represents the username or client_id extracted ++ // from the access token or ++ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header ++ // in the inbound request from outside the system. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'username' ++ EnduserIDKey = attribute.Key("enduser.id") ++ ++ // EnduserRoleKey is the attribute Key conforming to the "enduser.role" ++ // semantic conventions. It represents the actual/assumed role the client ++ // is making the request under extracted from token or application security ++ // context. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'admin' ++ EnduserRoleKey = attribute.Key("enduser.role") ++ ++ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" ++ // semantic conventions. It represents the scopes or granted authorities ++ // the client currently possesses extracted from token or application ++ // security context. The value would come from the scope associated with an ++ // [OAuth 2.0 Access ++ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute ++ // value in a [SAML 2.0 ++ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'read:message, write:files' ++ EnduserScopeKey = attribute.Key("enduser.scope") ++) ++ ++// EnduserID returns an attribute KeyValue conforming to the "enduser.id" ++// semantic conventions. It represents the username or client_id extracted from ++// the access token or ++// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in ++// the inbound request from outside the system. ++func EnduserID(val string) attribute.KeyValue { ++ return EnduserIDKey.String(val) ++} ++ ++// EnduserRole returns an attribute KeyValue conforming to the ++// "enduser.role" semantic conventions. It represents the actual/assumed role ++// the client is making the request under extracted from token or application ++// security context. ++func EnduserRole(val string) attribute.KeyValue { ++ return EnduserRoleKey.String(val) ++} ++ ++// EnduserScope returns an attribute KeyValue conforming to the ++// "enduser.scope" semantic conventions. It represents the scopes or granted ++// authorities the client currently possesses extracted from token or ++// application security context. The value would come from the scope associated ++// with an [OAuth 2.0 Access ++// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute ++// value in a [SAML 2.0 ++// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). ++func EnduserScope(val string) attribute.KeyValue { ++ return EnduserScopeKey.String(val) ++} ++ ++// These attributes may be used for any operation to store information about a ++// thread that started a span. ++const ( ++ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic ++ // conventions. It represents the current "managed" thread ID (as opposed ++ // to OS thread ID). ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ ThreadIDKey = attribute.Key("thread.id") ++ ++ // ThreadNameKey is the attribute Key conforming to the "thread.name" ++ // semantic conventions. It represents the current thread name. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'main' ++ ThreadNameKey = attribute.Key("thread.name") ++) ++ ++// ThreadID returns an attribute KeyValue conforming to the "thread.id" ++// semantic conventions. It represents the current "managed" thread ID (as ++// opposed to OS thread ID). ++func ThreadID(val int) attribute.KeyValue { ++ return ThreadIDKey.Int(val) ++} ++ ++// ThreadName returns an attribute KeyValue conforming to the "thread.name" ++// semantic conventions. It represents the current thread name. ++func ThreadName(val string) attribute.KeyValue { ++ return ThreadNameKey.String(val) ++} ++ ++// These attributes allow to report this unit of code and therefore to provide ++// more context about the span. ++const ( ++ // CodeFunctionKey is the attribute Key conforming to the "code.function" ++ // semantic conventions. It represents the method or function name, or ++ // equivalent (usually rightmost part of the code unit's name). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'serveRequest' ++ CodeFunctionKey = attribute.Key("code.function") ++ ++ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" ++ // semantic conventions. It represents the "namespace" within which ++ // `code.function` is defined. Usually the qualified class or module name, ++ // such that `code.namespace` + some separator + `code.function` form a ++ // unique identifier for the code unit. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'com.example.MyHTTPService' ++ CodeNamespaceKey = attribute.Key("code.namespace") ++ ++ // CodeFilepathKey is the attribute Key conforming to the "code.filepath" ++ // semantic conventions. It represents the source code file name that ++ // identifies the code unit as uniquely as possible (preferably an absolute ++ // file path). ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '/usr/local/MyApplication/content_root/app/index.php' ++ CodeFilepathKey = attribute.Key("code.filepath") ++ ++ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" ++ // semantic conventions. It represents the line number in `code.filepath` ++ // best representing the operation. It SHOULD point within the code unit ++ // named in `code.function`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 42 ++ CodeLineNumberKey = attribute.Key("code.lineno") ++ ++ // CodeColumnKey is the attribute Key conforming to the "code.column" ++ // semantic conventions. It represents the column number in `code.filepath` ++ // best representing the operation. It SHOULD point within the code unit ++ // named in `code.function`. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 16 ++ CodeColumnKey = attribute.Key("code.column") ++) ++ ++// CodeFunction returns an attribute KeyValue conforming to the ++// "code.function" semantic conventions. It represents the method or function ++// name, or equivalent (usually rightmost part of the code unit's name). ++func CodeFunction(val string) attribute.KeyValue { ++ return CodeFunctionKey.String(val) ++} ++ ++// CodeNamespace returns an attribute KeyValue conforming to the ++// "code.namespace" semantic conventions. It represents the "namespace" within ++// which `code.function` is defined. Usually the qualified class or module ++// name, such that `code.namespace` + some separator + `code.function` form a ++// unique identifier for the code unit. ++func CodeNamespace(val string) attribute.KeyValue { ++ return CodeNamespaceKey.String(val) ++} ++ ++// CodeFilepath returns an attribute KeyValue conforming to the ++// "code.filepath" semantic conventions. It represents the source code file ++// name that identifies the code unit as uniquely as possible (preferably an ++// absolute file path). ++func CodeFilepath(val string) attribute.KeyValue { ++ return CodeFilepathKey.String(val) ++} ++ ++// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" ++// semantic conventions. It represents the line number in `code.filepath` best ++// representing the operation. It SHOULD point within the code unit named in ++// `code.function`. ++func CodeLineNumber(val int) attribute.KeyValue { ++ return CodeLineNumberKey.Int(val) ++} ++ ++// CodeColumn returns an attribute KeyValue conforming to the "code.column" ++// semantic conventions. It represents the column number in `code.filepath` ++// best representing the operation. It SHOULD point within the code unit named ++// in `code.function`. ++func CodeColumn(val int) attribute.KeyValue { ++ return CodeColumnKey.Int(val) ++} ++ ++// Semantic Convention for HTTP Client ++const ( ++ // HTTPResendCountKey is the attribute Key conforming to the ++ // "http.resend_count" semantic conventions. It represents the ordinal ++ // number of request resending attempt (for any reason, including ++ // redirects). ++ // ++ // Type: int ++ // RequirementLevel: Recommended (if and only if request was retried.) ++ // Stability: stable ++ // Examples: 3 ++ // Note: The resend count SHOULD be updated each time an HTTP request gets ++ // resent by the client, regardless of what was the cause of the resending ++ // (e.g. redirection, authorization failure, 503 Server Unavailable, ++ // network issues, or any other). ++ HTTPResendCountKey = attribute.Key("http.resend_count") ++) ++ ++// HTTPResendCount returns an attribute KeyValue conforming to the ++// "http.resend_count" semantic conventions. It represents the ordinal number ++// of request resending attempt (for any reason, including redirects). ++func HTTPResendCount(val int) attribute.KeyValue { ++ return HTTPResendCountKey.Int(val) ++} ++ ++// The `aws` conventions apply to operations using the AWS SDK. They map ++// request or response parameters in AWS SDK API calls to attributes on a Span. ++// The conventions have been collected over time based on feedback from AWS ++// users of tracing and will continue to evolve as new interesting conventions ++// are found. ++// Some descriptions are also provided for populating general OpenTelemetry ++// semantic conventions based on these APIs. ++const ( ++ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" ++ // semantic conventions. It represents the AWS request ID as returned in ++ // the response headers `x-amz-request-id` or `x-amz-requestid`. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' ++ AWSRequestIDKey = attribute.Key("aws.request_id") ++) ++ ++// AWSRequestID returns an attribute KeyValue conforming to the ++// "aws.request_id" semantic conventions. It represents the AWS request ID as ++// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. ++func AWSRequestID(val string) attribute.KeyValue { ++ return AWSRequestIDKey.String(val) ++} ++ ++// Attributes that exist for multiple DynamoDB request types. ++const ( ++ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the ++ // "aws.dynamodb.table_names" semantic conventions. It represents the keys ++ // in the `RequestItems` object field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Users', 'Cats' ++ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") ++ ++ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the ++ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the ++ // JSON-serialized value of each item in the `ConsumedCapacity` response ++ // field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { ++ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : ++ // { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": ++ // { "CapacityUnits": number, "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number }, "TableName": "string", ++ // "WriteCapacityUnits": number }' ++ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") ++ ++ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to ++ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It ++ // represents the JSON-serialized value of the `ItemCollectionMetrics` ++ // response field. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": ++ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { ++ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], ++ // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, ++ // "SizeEstimateRangeGB": [ number ] } ] }' ++ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") ++ ++ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to ++ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It ++ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` ++ // request parameter. ++ // ++ // Type: double ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1.0, 2.0 ++ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") ++ ++ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming ++ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. ++ // It represents the value of the ++ // `ProvisionedThroughput.WriteCapacityUnits` request parameter. ++ // ++ // Type: double ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 1.0, 2.0 ++ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") ++ ++ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the ++ // "aws.dynamodb.consistent_read" semantic conventions. It represents the ++ // value of the `ConsistentRead` request parameter. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") ++ ++ // AWSDynamoDBProjectionKey is the attribute Key conforming to the ++ // "aws.dynamodb.projection" semantic conventions. It represents the value ++ // of the `ProjectionExpression` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Title', 'Title, Price, Color', 'Title, Description, ++ // RelatedItems, ProductReviews' ++ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") ++ ++ // AWSDynamoDBLimitKey is the attribute Key conforming to the ++ // "aws.dynamodb.limit" semantic conventions. It represents the value of ++ // the `Limit` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") ++ ++ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the ++ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the ++ // value of the `AttributesToGet` request parameter. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'lives', 'id' ++ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") ++ ++ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the ++ // "aws.dynamodb.index_name" semantic conventions. It represents the value ++ // of the `IndexName` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'name_to_group' ++ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") ++ ++ // AWSDynamoDBSelectKey is the attribute Key conforming to the ++ // "aws.dynamodb.select" semantic conventions. It represents the value of ++ // the `Select` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'ALL_ATTRIBUTES', 'COUNT' ++ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ++) ++ ++// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the ++// "aws.dynamodb.table_names" semantic conventions. It represents the keys in ++// the `RequestItems` object field. ++func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { ++ return AWSDynamoDBTableNamesKey.StringSlice(val) ++} ++ ++// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to ++// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the ++// JSON-serialized value of each item in the `ConsumedCapacity` response field. ++func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { ++ return AWSDynamoDBConsumedCapacityKey.StringSlice(val) ++} ++ ++// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming ++// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It ++// represents the JSON-serialized value of the `ItemCollectionMetrics` response ++// field. ++func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { ++ return AWSDynamoDBItemCollectionMetricsKey.String(val) ++} ++ ++// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue ++// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic ++// conventions. It represents the value of the ++// `ProvisionedThroughput.ReadCapacityUnits` request parameter. ++func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { ++ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) ++} ++ ++// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue ++// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic ++// conventions. It represents the value of the ++// `ProvisionedThroughput.WriteCapacityUnits` request parameter. ++func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { ++ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) ++} ++ ++// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the ++// "aws.dynamodb.consistent_read" semantic conventions. It represents the value ++// of the `ConsistentRead` request parameter. ++func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { ++ return AWSDynamoDBConsistentReadKey.Bool(val) ++} ++ ++// AWSDynamoDBProjection returns an attribute KeyValue conforming to the ++// "aws.dynamodb.projection" semantic conventions. It represents the value of ++// the `ProjectionExpression` request parameter. ++func AWSDynamoDBProjection(val string) attribute.KeyValue { ++ return AWSDynamoDBProjectionKey.String(val) ++} ++ ++// AWSDynamoDBLimit returns an attribute KeyValue conforming to the ++// "aws.dynamodb.limit" semantic conventions. It represents the value of the ++// `Limit` request parameter. ++func AWSDynamoDBLimit(val int) attribute.KeyValue { ++ return AWSDynamoDBLimitKey.Int(val) ++} ++ ++// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to ++// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the ++// value of the `AttributesToGet` request parameter. ++func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { ++ return AWSDynamoDBAttributesToGetKey.StringSlice(val) ++} ++ ++// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the ++// "aws.dynamodb.index_name" semantic conventions. It represents the value of ++// the `IndexName` request parameter. ++func AWSDynamoDBIndexName(val string) attribute.KeyValue { ++ return AWSDynamoDBIndexNameKey.String(val) ++} ++ ++// AWSDynamoDBSelect returns an attribute KeyValue conforming to the ++// "aws.dynamodb.select" semantic conventions. It represents the value of the ++// `Select` request parameter. ++func AWSDynamoDBSelect(val string) attribute.KeyValue { ++ return AWSDynamoDBSelectKey.String(val) ++} ++ ++// DynamoDB.CreateTable ++const ( ++ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to ++ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It ++ // represents the JSON-serialized value of each item of the ++ // `GlobalSecondaryIndexes` request field ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": ++ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ ++ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { ++ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' ++ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") ++ ++ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to ++ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It ++ // represents the JSON-serialized value of each item of the ++ // `LocalSecondaryIndexes` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "IndexARN": "string", "IndexName": "string", ++ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { ++ // "AttributeName": "string", "KeyType": "string" } ], "Projection": { ++ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' ++ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ++) ++ ++// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue ++// conforming to the "aws.dynamodb.global_secondary_indexes" semantic ++// conventions. It represents the JSON-serialized value of each item of the ++// `GlobalSecondaryIndexes` request field ++func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { ++ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) ++} ++ ++// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming ++// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It ++// represents the JSON-serialized value of each item of the ++// `LocalSecondaryIndexes` request field. ++func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { ++ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) ++} ++ ++// DynamoDB.ListTables ++const ( ++ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the ++ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents ++ // the value of the `ExclusiveStartTableName` request parameter. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Users', 'CatsTable' ++ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") ++ ++ // AWSDynamoDBTableCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.table_count" semantic conventions. It represents the the ++ // number of items in the `TableNames` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 20 ++ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ++) ++ ++// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming ++// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It ++// represents the value of the `ExclusiveStartTableName` request parameter. ++func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { ++ return AWSDynamoDBExclusiveStartTableKey.String(val) ++} ++ ++// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.table_count" semantic conventions. It represents the the ++// number of items in the `TableNames` response parameter. ++func AWSDynamoDBTableCount(val int) attribute.KeyValue { ++ return AWSDynamoDBTableCountKey.Int(val) ++} ++ ++// DynamoDB.Query ++const ( ++ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the ++ // "aws.dynamodb.scan_forward" semantic conventions. It represents the ++ // value of the `ScanIndexForward` request parameter. ++ // ++ // Type: boolean ++ // RequirementLevel: Optional ++ // Stability: stable ++ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ++) ++ ++// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the ++// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of ++// the `ScanIndexForward` request parameter. ++func AWSDynamoDBScanForward(val bool) attribute.KeyValue { ++ return AWSDynamoDBScanForwardKey.Bool(val) ++} ++ ++// DynamoDB.Scan ++const ( ++ // AWSDynamoDBSegmentKey is the attribute Key conforming to the ++ // "aws.dynamodb.segment" semantic conventions. It represents the value of ++ // the `Segment` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") ++ ++ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the ++ // "aws.dynamodb.total_segments" semantic conventions. It represents the ++ // value of the `TotalSegments` request parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 100 ++ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") ++ ++ // AWSDynamoDBCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.count" semantic conventions. It represents the value of ++ // the `Count` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 10 ++ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") ++ ++ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the ++ // "aws.dynamodb.scanned_count" semantic conventions. It represents the ++ // value of the `ScannedCount` response parameter. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 50 ++ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ++) ++ ++// AWSDynamoDBSegment returns an attribute KeyValue conforming to the ++// "aws.dynamodb.segment" semantic conventions. It represents the value of the ++// `Segment` request parameter. ++func AWSDynamoDBSegment(val int) attribute.KeyValue { ++ return AWSDynamoDBSegmentKey.Int(val) ++} ++ ++// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the ++// "aws.dynamodb.total_segments" semantic conventions. It represents the value ++// of the `TotalSegments` request parameter. ++func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { ++ return AWSDynamoDBTotalSegmentsKey.Int(val) ++} ++ ++// AWSDynamoDBCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.count" semantic conventions. It represents the value of the ++// `Count` response parameter. ++func AWSDynamoDBCount(val int) attribute.KeyValue { ++ return AWSDynamoDBCountKey.Int(val) ++} ++ ++// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the ++// "aws.dynamodb.scanned_count" semantic conventions. It represents the value ++// of the `ScannedCount` response parameter. ++func AWSDynamoDBScannedCount(val int) attribute.KeyValue { ++ return AWSDynamoDBScannedCountKey.Int(val) ++} ++ ++// DynamoDB.UpdateTable ++const ( ++ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to ++ // the "aws.dynamodb.attribute_definitions" semantic conventions. It ++ // represents the JSON-serialized value of each item in the ++ // `AttributeDefinitions` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' ++ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") ++ ++ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key ++ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic ++ // conventions. It represents the JSON-serialized value of each item in the ++ // the `GlobalSecondaryIndexUpdates` request field. ++ // ++ // Type: string[] ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { ++ // "AttributeName": "string", "KeyType": "string" } ], "Projection": { ++ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, ++ // "ProvisionedThroughput": { "ReadCapacityUnits": number, ++ // "WriteCapacityUnits": number } }' ++ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ++) ++ ++// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming ++// to the "aws.dynamodb.attribute_definitions" semantic conventions. It ++// represents the JSON-serialized value of each item in the ++// `AttributeDefinitions` request field. ++func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { ++ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) ++} ++ ++// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue ++// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic ++// conventions. It represents the JSON-serialized value of each item in the the ++// `GlobalSecondaryIndexUpdates` request field. ++func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { ++ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) ++} ++ ++// Attributes that exist for S3 request types. ++const ( ++ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" ++ // semantic conventions. It represents the S3 bucket name the request ++ // refers to. Corresponds to the `--bucket` parameter of the [S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) ++ // operations. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'some-bucket-name' ++ // Note: The `bucket` attribute is applicable to all S3 operations that ++ // reference a bucket, i.e. that require the bucket name as a mandatory ++ // parameter. ++ // This applies to almost all S3 operations except `list-buckets`. ++ AWSS3BucketKey = attribute.Key("aws.s3.bucket") ++ ++ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic ++ // conventions. It represents the S3 object key the request refers to. ++ // Corresponds to the `--key` parameter of the [S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) ++ // operations. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'someFile.yml' ++ // Note: The `key` attribute is applicable to all object-related S3 ++ // operations, i.e. that require the object key as a mandatory parameter. ++ // This applies in particular to the following operations: ++ // ++ // - ++ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) ++ // - ++ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) ++ // - ++ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) ++ // - ++ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) ++ // - ++ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) ++ // - ++ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) ++ // - ++ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) ++ // - ++ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) ++ // - ++ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) ++ // - ++ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) ++ // - ++ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) ++ // - ++ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) ++ // - ++ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) ++ AWSS3KeyKey = attribute.Key("aws.s3.key") ++ ++ // AWSS3CopySourceKey is the attribute Key conforming to the ++ // "aws.s3.copy_source" semantic conventions. It represents the source ++ // object (in the form `bucket`/`key`) for the copy operation. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'someFile.yml' ++ // Note: The `copy_source` attribute applies to S3 copy operations and ++ // corresponds to the `--copy-source` parameter ++ // of the [copy-object operation within the S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). ++ // This applies in particular to the following operations: ++ // ++ // - ++ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) ++ // - ++ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) ++ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") ++ ++ // AWSS3UploadIDKey is the attribute Key conforming to the ++ // "aws.s3.upload_id" semantic conventions. It represents the upload ID ++ // that identifies the multipart upload. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' ++ // Note: The `upload_id` attribute applies to S3 multipart-upload ++ // operations and corresponds to the `--upload-id` parameter ++ // of the [S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) ++ // multipart operations. ++ // This applies in particular to the following operations: ++ // ++ // - ++ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) ++ // - ++ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) ++ // - ++ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) ++ // - ++ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) ++ // - ++ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) ++ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") ++ ++ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" ++ // semantic conventions. It represents the delete request container that ++ // specifies the objects to be deleted. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: ++ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' ++ // Note: The `delete` attribute is only applicable to the ++ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) ++ // operation. ++ // The `delete` attribute corresponds to the `--delete` parameter of the ++ // [delete-objects operation within the S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). ++ AWSS3DeleteKey = attribute.Key("aws.s3.delete") ++ ++ // AWSS3PartNumberKey is the attribute Key conforming to the ++ // "aws.s3.part_number" semantic conventions. It represents the part number ++ // of the part being uploaded in a multipart-upload operation. This is a ++ // positive integer between 1 and 10,000. ++ // ++ // Type: int ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 3456 ++ // Note: The `part_number` attribute is only applicable to the ++ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) ++ // and ++ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) ++ // operations. ++ // The `part_number` attribute corresponds to the `--part-number` parameter ++ // of the ++ // [upload-part operation within the S3 ++ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). ++ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") ++) ++ ++// AWSS3Bucket returns an attribute KeyValue conforming to the ++// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the ++// request refers to. Corresponds to the `--bucket` parameter of the [S3 ++// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) ++// operations. ++func AWSS3Bucket(val string) attribute.KeyValue { ++ return AWSS3BucketKey.String(val) ++} ++ ++// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" ++// semantic conventions. It represents the S3 object key the request refers to. ++// Corresponds to the `--key` parameter of the [S3 ++// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) ++// operations. ++func AWSS3Key(val string) attribute.KeyValue { ++ return AWSS3KeyKey.String(val) ++} ++ ++// AWSS3CopySource returns an attribute KeyValue conforming to the ++// "aws.s3.copy_source" semantic conventions. It represents the source object ++// (in the form `bucket`/`key`) for the copy operation. ++func AWSS3CopySource(val string) attribute.KeyValue { ++ return AWSS3CopySourceKey.String(val) ++} ++ ++// AWSS3UploadID returns an attribute KeyValue conforming to the ++// "aws.s3.upload_id" semantic conventions. It represents the upload ID that ++// identifies the multipart upload. ++func AWSS3UploadID(val string) attribute.KeyValue { ++ return AWSS3UploadIDKey.String(val) ++} ++ ++// AWSS3Delete returns an attribute KeyValue conforming to the ++// "aws.s3.delete" semantic conventions. It represents the delete request ++// container that specifies the objects to be deleted. ++func AWSS3Delete(val string) attribute.KeyValue { ++ return AWSS3DeleteKey.String(val) ++} ++ ++// AWSS3PartNumber returns an attribute KeyValue conforming to the ++// "aws.s3.part_number" semantic conventions. It represents the part number of ++// the part being uploaded in a multipart-upload operation. This is a positive ++// integer between 1 and 10,000. ++func AWSS3PartNumber(val int) attribute.KeyValue { ++ return AWSS3PartNumberKey.Int(val) ++} ++ ++// Semantic conventions to apply when instrumenting the GraphQL implementation. ++// They map GraphQL operations to attributes on a Span. ++const ( ++ // GraphqlOperationNameKey is the attribute Key conforming to the ++ // "graphql.operation.name" semantic conventions. It represents the name of ++ // the operation being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'findBookByID' ++ GraphqlOperationNameKey = attribute.Key("graphql.operation.name") ++ ++ // GraphqlOperationTypeKey is the attribute Key conforming to the ++ // "graphql.operation.type" semantic conventions. It represents the type of ++ // the operation being executed. ++ // ++ // Type: Enum ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'query', 'mutation', 'subscription' ++ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") ++ ++ // GraphqlDocumentKey is the attribute Key conforming to the ++ // "graphql.document" semantic conventions. It represents the GraphQL ++ // document being executed. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'query findBookByID { bookByID(id: ?) { name } }' ++ // Note: The value may be sanitized to exclude sensitive information. ++ GraphqlDocumentKey = attribute.Key("graphql.document") ++) ++ ++var ( ++ // GraphQL query ++ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") ++ // GraphQL mutation ++ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") ++ // GraphQL subscription ++ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ++) ++ ++// GraphqlOperationName returns an attribute KeyValue conforming to the ++// "graphql.operation.name" semantic conventions. It represents the name of the ++// operation being executed. ++func GraphqlOperationName(val string) attribute.KeyValue { ++ return GraphqlOperationNameKey.String(val) ++} ++ ++// GraphqlDocument returns an attribute KeyValue conforming to the ++// "graphql.document" semantic conventions. It represents the GraphQL document ++// being executed. ++func GraphqlDocument(val string) attribute.KeyValue { ++ return GraphqlDocumentKey.String(val) ++} ++ ++// General attributes used in messaging systems. ++const ( ++ // MessagingSystemKey is the attribute Key conforming to the ++ // "messaging.system" semantic conventions. It represents a string ++ // identifying the messaging system. ++ // ++ // Type: string ++ // RequirementLevel: Required ++ // Stability: stable ++ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' ++ MessagingSystemKey = attribute.Key("messaging.system") ++ ++ // MessagingOperationKey is the attribute Key conforming to the ++ // "messaging.operation" semantic conventions. It represents a string ++ // identifying the kind of messaging operation as defined in the [Operation ++ // names](#operation-names) section above. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ // Note: If a custom value is used, it MUST be of low cardinality. ++ MessagingOperationKey = attribute.Key("messaging.operation") ++ ++ // MessagingBatchMessageCountKey is the attribute Key conforming to the ++ // "messaging.batch.message_count" semantic conventions. It represents the ++ // number of messages sent, received, or processed in the scope of the ++ // batching operation. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If the span describes an ++ // operation on a batch of messages.) ++ // Stability: stable ++ // Examples: 0, 1, 2 ++ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on ++ // spans that operate with a single message. When a messaging client ++ // library supports both batch and single-message API for the same ++ // operation, instrumentations SHOULD use `messaging.batch.message_count` ++ // for batching APIs and SHOULD NOT use it for single-message APIs. ++ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") ++ ++ // MessagingClientIDKey is the attribute Key conforming to the ++ // "messaging.client_id" semantic conventions. It represents a unique ++ // identifier for the client that consumes or produces a message. ++ // ++ // Type: string ++ // RequirementLevel: Recommended (If a client id is available) ++ // Stability: stable ++ // Examples: 'client-5', 'myhost@8742@s8083jm' ++ MessagingClientIDKey = attribute.Key("messaging.client_id") ++) ++ ++var ( ++ // publish ++ MessagingOperationPublish = MessagingOperationKey.String("publish") ++ // receive ++ MessagingOperationReceive = MessagingOperationKey.String("receive") ++ // process ++ MessagingOperationProcess = MessagingOperationKey.String("process") ++) ++ ++// MessagingSystem returns an attribute KeyValue conforming to the ++// "messaging.system" semantic conventions. It represents a string identifying ++// the messaging system. ++func MessagingSystem(val string) attribute.KeyValue { ++ return MessagingSystemKey.String(val) ++} ++ ++// MessagingBatchMessageCount returns an attribute KeyValue conforming to ++// the "messaging.batch.message_count" semantic conventions. It represents the ++// number of messages sent, received, or processed in the scope of the batching ++// operation. ++func MessagingBatchMessageCount(val int) attribute.KeyValue { ++ return MessagingBatchMessageCountKey.Int(val) ++} ++ ++// MessagingClientID returns an attribute KeyValue conforming to the ++// "messaging.client_id" semantic conventions. It represents a unique ++// identifier for the client that consumes or produces a message. ++func MessagingClientID(val string) attribute.KeyValue { ++ return MessagingClientIDKey.String(val) ++} ++ ++// Semantic conventions for remote procedure calls. ++const ( ++ // RPCSystemKey is the attribute Key conforming to the "rpc.system" ++ // semantic conventions. It represents a string identifying the remoting ++ // system. See below for a list of well-known identifiers. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ RPCSystemKey = attribute.Key("rpc.system") ++ ++ // RPCServiceKey is the attribute Key conforming to the "rpc.service" ++ // semantic conventions. It represents the full (logical) name of the ++ // service being called, including its package name, if applicable. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'myservice.EchoService' ++ // Note: This is the logical name of the service from the RPC interface ++ // perspective, which can be different from the name of any implementing ++ // class. The `code.namespace` attribute may be used to store the latter ++ // (despite the attribute name, it may include a class name; e.g., class ++ // with method actually executing the call on the server side, RPC client ++ // stub class on the client side). ++ RPCServiceKey = attribute.Key("rpc.service") ++ ++ // RPCMethodKey is the attribute Key conforming to the "rpc.method" ++ // semantic conventions. It represents the name of the (logical) method ++ // being called, must be equal to the $method part in the span name. ++ // ++ // Type: string ++ // RequirementLevel: Recommended ++ // Stability: stable ++ // Examples: 'exampleMethod' ++ // Note: This is the logical name of the method from the RPC interface ++ // perspective, which can be different from the name of any implementing ++ // method/function. The `code.function` attribute may be used to store the ++ // latter (e.g., method actually executing the call on the server side, RPC ++ // client stub method on the client side). ++ RPCMethodKey = attribute.Key("rpc.method") ++) ++ ++var ( ++ // gRPC ++ RPCSystemGRPC = RPCSystemKey.String("grpc") ++ // Java RMI ++ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") ++ // .NET WCF ++ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") ++ // Apache Dubbo ++ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ++ // Connect RPC ++ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") ++) ++ ++// RPCService returns an attribute KeyValue conforming to the "rpc.service" ++// semantic conventions. It represents the full (logical) name of the service ++// being called, including its package name, if applicable. ++func RPCService(val string) attribute.KeyValue { ++ return RPCServiceKey.String(val) ++} ++ ++// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" ++// semantic conventions. It represents the name of the (logical) method being ++// called, must be equal to the $method part in the span name. ++func RPCMethod(val string) attribute.KeyValue { ++ return RPCMethodKey.String(val) ++} ++ ++// Tech-specific attributes for gRPC. ++const ( ++ // RPCGRPCStatusCodeKey is the attribute Key conforming to the ++ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric ++ // status ++ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of ++ // the gRPC request. ++ // ++ // Type: Enum ++ // RequirementLevel: Required ++ // Stability: stable ++ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ++) ++ ++var ( ++ // OK ++ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) ++ // CANCELLED ++ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) ++ // UNKNOWN ++ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) ++ // INVALID_ARGUMENT ++ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) ++ // DEADLINE_EXCEEDED ++ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) ++ // NOT_FOUND ++ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) ++ // ALREADY_EXISTS ++ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) ++ // PERMISSION_DENIED ++ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) ++ // RESOURCE_EXHAUSTED ++ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) ++ // FAILED_PRECONDITION ++ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) ++ // ABORTED ++ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) ++ // OUT_OF_RANGE ++ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) ++ // UNIMPLEMENTED ++ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) ++ // INTERNAL ++ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) ++ // UNAVAILABLE ++ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) ++ // DATA_LOSS ++ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) ++ // UNAUTHENTICATED ++ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ++) ++ ++// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). ++const ( ++ // RPCJsonrpcVersionKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol ++ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 ++ // does not specify this, the value can be omitted. ++ // ++ // Type: string ++ // RequirementLevel: ConditionallyRequired (If other than the default ++ // version (`1.0`)) ++ // Stability: stable ++ // Examples: '2.0', '1.0' ++ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") ++ ++ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` ++ // property of request or response. Since protocol allows id to be int, ++ // string, `null` or missing (for notifications), value is expected to be ++ // cast to string for simplicity. Use empty string in case of `null` value. ++ // Omit entirely if this is a notification. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: '10', 'request-7', '' ++ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") ++ ++ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.error_code" semantic conventions. It represents the ++ // `error.code` property of response if it is an error response. ++ // ++ // Type: int ++ // RequirementLevel: ConditionallyRequired (If response is not successful.) ++ // Stability: stable ++ // Examples: -32700, 100 ++ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") ++ ++ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the ++ // "rpc.jsonrpc.error_message" semantic conventions. It represents the ++ // `error.message` property of response if it is an error response. ++ // ++ // Type: string ++ // RequirementLevel: Optional ++ // Stability: stable ++ // Examples: 'Parse error', 'User already exists' ++ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ++) ++ ++// RPCJsonrpcVersion returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.version" semantic conventions. It represents the protocol ++// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 ++// does not specify this, the value can be omitted. ++func RPCJsonrpcVersion(val string) attribute.KeyValue { ++ return RPCJsonrpcVersionKey.String(val) ++} ++ ++// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` ++// property of request or response. Since protocol allows id to be int, string, ++// `null` or missing (for notifications), value is expected to be cast to ++// string for simplicity. Use empty string in case of `null` value. Omit ++// entirely if this is a notification. ++func RPCJsonrpcRequestID(val string) attribute.KeyValue { ++ return RPCJsonrpcRequestIDKey.String(val) ++} ++ ++// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.error_code" semantic conventions. It represents the ++// `error.code` property of response if it is an error response. ++func RPCJsonrpcErrorCode(val int) attribute.KeyValue { ++ return RPCJsonrpcErrorCodeKey.Int(val) ++} ++ ++// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the ++// "rpc.jsonrpc.error_message" semantic conventions. It represents the ++// `error.message` property of response if it is an error response. ++func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { ++ return RPCJsonrpcErrorMessageKey.String(val) ++} ++ ++// Tech-specific attributes for Connect RPC. ++const ( ++ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the ++ // "rpc.connect_rpc.error_code" semantic conventions. It represents the ++ // [error codes](https://connect.build/docs/protocol/#error-codes) of the ++ // Connect request. Error codes are always string values. ++ // ++ // Type: Enum ++ // RequirementLevel: ConditionallyRequired (If response is not successful ++ // and if error code available.) ++ // Stability: stable ++ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") ++) ++ ++var ( ++ // cancelled ++ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") ++ // unknown ++ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") ++ // invalid_argument ++ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") ++ // deadline_exceeded ++ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") ++ // not_found ++ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") ++ // already_exists ++ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") ++ // permission_denied ++ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") ++ // resource_exhausted ++ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") ++ // failed_precondition ++ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") ++ // aborted ++ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") ++ // out_of_range ++ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") ++ // unimplemented ++ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") ++ // internal ++ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") ++ // unavailable ++ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") ++ // data_loss ++ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") ++ // unauthenticated ++ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") ++) +diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go +index f058cc781e0..3aadc66cf7a 100644 +--- a/vendor/go.opentelemetry.io/otel/trace/config.go ++++ b/vendor/go.opentelemetry.io/otel/trace/config.go +@@ -25,6 +25,7 @@ type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string ++ attrs attribute.Set + } + + // InstrumentationVersion returns the version of the library providing instrumentation. +@@ -32,6 +33,12 @@ func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion + } + ++// InstrumentationAttributes returns the attributes associated with the library ++// providing instrumentation. ++func (t *TracerConfig) InstrumentationAttributes() attribute.Set { ++ return t.attrs ++} ++ + // SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. + func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +@@ -261,6 +268,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c + } ++ + func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +@@ -307,6 +315,16 @@ func WithInstrumentationVersion(version string) TracerOption { + }) + } + ++// WithInstrumentationAttributes sets the instrumentation attributes. ++// ++// The passed attributes will be de-duplicated. ++func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { ++ return tracerOptionFunc(func(config TracerConfig) TracerConfig { ++ config.attrs = attribute.NewSet(attr...) ++ return config ++ }) ++} ++ + // WithSchemaURL sets the schema URL for the Tracer. + func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { +diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go +index 391417718f5..440f3d7565a 100644 +--- a/vendor/go.opentelemetry.io/otel/trace/doc.go ++++ b/vendor/go.opentelemetry.io/otel/trace/doc.go +@@ -17,7 +17,7 @@ Package trace provides an implementation of the tracing part of the + OpenTelemetry API. + + To participate in distributed traces a Span needs to be created for the +-operation being performed as part of a traced workflow. It its simplest form: ++operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + +@@ -62,5 +62,69 @@ a default. + defer span.End() + // ... + } ++ ++# API Implementations ++ ++This package does not conform to the standard Go versioning policy; all of its ++interfaces may have methods added to them without a package major version bump. ++This non-standard API evolution could surprise an uninformed implementation ++author. They could unknowingly build their implementation in a way that would ++result in a runtime panic for their users that update to the new API. ++ ++The API is designed to help inform an instrumentation author about this ++non-standard API evolution. It requires them to choose a default behavior for ++unimplemented interface methods. There are three behavior choices they can ++make: ++ ++ - Compilation failure ++ - Panic ++ - Default to another implementation ++ ++All interfaces in this API embed a corresponding interface from ++[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default ++behavior of their implementations to be a compilation failure, signaling to ++their users they need to update to the latest version of that implementation, ++they need to embed the corresponding interface from ++[go.opentelemetry.io/otel/trace/embedded] in their implementation. For ++example, ++ ++ import "go.opentelemetry.io/otel/trace/embedded" ++ ++ type TracerProvider struct { ++ embedded.TracerProvider ++ // ... ++ } ++ ++If an author wants the default behavior of their implementations to panic, they ++can embed the API interface directly. ++ ++ import "go.opentelemetry.io/otel/trace" ++ ++ type TracerProvider struct { ++ trace.TracerProvider ++ // ... ++ } ++ ++This option is not recommended. It will lead to publishing packages that ++contain runtime panics when users update to newer versions of ++[go.opentelemetry.io/otel/trace], which may be done with a trasitive ++dependency. ++ ++Finally, an author can embed another implementation in theirs. The embedded ++implementation will be used for methods not defined by the author. For example, ++an author who wants to default to silently dropping the call can use ++[go.opentelemetry.io/otel/trace/noop]: ++ ++ import "go.opentelemetry.io/otel/trace/noop" ++ ++ type TracerProvider struct { ++ noop.TracerProvider ++ // ... ++ } ++ ++It is strongly recommended that authors only embed ++[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. ++That implementation is the only one OpenTelemetry authors can guarantee will ++fully implement all the API interfaces when a user updates their API. + */ + package trace // import "go.opentelemetry.io/otel/trace" +diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +new file mode 100644 +index 00000000000..898db5a7546 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +@@ -0,0 +1,56 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package embedded provides interfaces embedded within the [OpenTelemetry ++// trace API]. ++// ++// Implementers of the [OpenTelemetry trace API] can embed the relevant type ++// from this package into their implementation directly. Doing so will result ++// in a compilation error for users when the [OpenTelemetry trace API] is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++// ++// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace ++package embedded // import "go.opentelemetry.io/otel/trace/embedded" ++ ++// TracerProvider is embedded in ++// [go.opentelemetry.io/otel/trace.TracerProvider]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to ++// experience a compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] ++// interface is extended (which is something that can happen without a major ++// version bump of the API package). ++type TracerProvider interface{ tracerProvider() } ++ ++// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a ++// compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface ++// is extended (which is something that can happen without a major version bump ++// of the API package). ++type Tracer interface{ tracer() } ++ ++// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. ++// ++// Embed this interface in your implementation of the ++// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a ++// compilation error, signaling they need to update to your latest ++// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is ++// extended (which is something that can happen without a major version bump of ++// the API package). ++type Span interface{ span() } +diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go +index 73950f20778..c125491caeb 100644 +--- a/vendor/go.opentelemetry.io/otel/trace/noop.go ++++ b/vendor/go.opentelemetry.io/otel/trace/noop.go +@@ -19,16 +19,20 @@ import ( + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" ++ "go.opentelemetry.io/otel/trace/embedded" + ) + + // NewNoopTracerProvider returns an implementation of TracerProvider that + // performs no operations. The Tracer and Spans created from the returned + // TracerProvider also perform no operations. ++// ++// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] ++// instead. + func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} + } + +-type noopTracerProvider struct{} ++type noopTracerProvider struct{ embedded.TracerProvider } + + var _ TracerProvider = noopTracerProvider{} + +@@ -37,8 +41,8 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} + } + +-// noopTracer is an implementation of Tracer that preforms no operations. +-type noopTracer struct{} ++// noopTracer is an implementation of Tracer that performs no operations. ++type noopTracer struct{ embedded.Tracer } + + var _ Tracer = noopTracer{} + +@@ -53,8 +57,8 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption + return ContextWithSpan(ctx, span), span + } + +-// noopSpan is an implementation of Span that preforms no operations. +-type noopSpan struct{} ++// noopSpan is an implementation of Span that performs no operations. ++type noopSpan struct{ embedded.Span } + + var _ Span = noopSpan{} + +diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +new file mode 100644 +index 00000000000..7f485543c47 +--- /dev/null ++++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +@@ -0,0 +1,118 @@ ++// Copyright The OpenTelemetry Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package noop provides an implementation of the OpenTelemetry trace API that ++// produces no telemetry and minimizes used computation resources. ++// ++// Using this package to implement the OpenTelemetry trace API will effectively ++// disable OpenTelemetry. ++// ++// This implementation can be embedded in other implementations of the ++// OpenTelemetry trace API. Doing so will mean the implementation defaults to ++// no operation for methods it does not implement. ++package noop // import "go.opentelemetry.io/otel/trace/noop" ++ ++import ( ++ "context" ++ ++ "go.opentelemetry.io/otel/attribute" ++ "go.opentelemetry.io/otel/codes" ++ "go.opentelemetry.io/otel/trace" ++ "go.opentelemetry.io/otel/trace/embedded" ++) ++ ++var ( ++ // Compile-time check this implements the OpenTelemetry API. ++ ++ _ trace.TracerProvider = TracerProvider{} ++ _ trace.Tracer = Tracer{} ++ _ trace.Span = Span{} ++) ++ ++// TracerProvider is an OpenTelemetry No-Op TracerProvider. ++type TracerProvider struct{ embedded.TracerProvider } ++ ++// NewTracerProvider returns a TracerProvider that does not record any telemetry. ++func NewTracerProvider() TracerProvider { ++ return TracerProvider{} ++} ++ ++// Tracer returns an OpenTelemetry Tracer that does not record any telemetry. ++func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { ++ return Tracer{} ++} ++ ++// Tracer is an OpenTelemetry No-Op Tracer. ++type Tracer struct{ embedded.Tracer } ++ ++// Start creates a span. The created span will be set in a child context of ctx ++// and returned with the span. ++// ++// If ctx contains a span context, the returned span will also contain that ++// span context. If the span context in ctx is for a non-recording span, that ++// span instance will be returned directly. ++func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { ++ span := trace.SpanFromContext(ctx) ++ ++ // If the parent context contains a non-zero span context, that span ++ // context needs to be returned as a non-recording span ++ // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). ++ var zeroSC trace.SpanContext ++ if sc := span.SpanContext(); !sc.Equal(zeroSC) { ++ if !span.IsRecording() { ++ // If the span is not recording return it directly. ++ return ctx, span ++ } ++ // Otherwise, return the span context needs in a non-recording span. ++ span = Span{sc: sc} ++ } else { ++ // No parent, return a No-Op span with an empty span context. ++ span = Span{} ++ } ++ return trace.ContextWithSpan(ctx, span), span ++} ++ ++// Span is an OpenTelemetry No-Op Span. ++type Span struct { ++ embedded.Span ++ ++ sc trace.SpanContext ++} ++ ++// SpanContext returns an empty span context. ++func (s Span) SpanContext() trace.SpanContext { return s.sc } ++ ++// IsRecording always returns false. ++func (Span) IsRecording() bool { return false } ++ ++// SetStatus does nothing. ++func (Span) SetStatus(codes.Code, string) {} ++ ++// SetAttributes does nothing. ++func (Span) SetAttributes(...attribute.KeyValue) {} ++ ++// End does nothing. ++func (Span) End(...trace.SpanEndOption) {} ++ ++// RecordError does nothing. ++func (Span) RecordError(error, ...trace.EventOption) {} ++ ++// AddEvent does nothing. ++func (Span) AddEvent(string, ...trace.EventOption) {} ++ ++// SetName does nothing. ++func (Span) SetName(string) {} ++ ++// TracerProvider returns a No-Op TracerProvider. ++func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } +diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go +index 97f3d83855b..26a4b2260ec 100644 +--- a/vendor/go.opentelemetry.io/otel/trace/trace.go ++++ b/vendor/go.opentelemetry.io/otel/trace/trace.go +@@ -22,6 +22,7 @@ import ( + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" ++ "go.opentelemetry.io/otel/trace/embedded" + ) + + const ( +@@ -48,8 +49,10 @@ func (e errorConst) Error() string { + // nolint:revive // revive complains about stutter of `trace.TraceID`. + type TraceID [16]byte + +-var nilTraceID TraceID +-var _ json.Marshaler = nilTraceID ++var ( ++ nilTraceID TraceID ++ _ json.Marshaler = nilTraceID ++) + + // IsValid checks whether the trace TraceID is valid. A valid trace ID does + // not consist of zeros only. +@@ -71,8 +74,10 @@ func (t TraceID) String() string { + // SpanID is a unique identity of a span in a trace. + type SpanID [8]byte + +-var nilSpanID SpanID +-var _ json.Marshaler = nilSpanID ++var ( ++ nilSpanID SpanID ++ _ json.Marshaler = nilSpanID ++) + + // IsValid checks whether the SpanID is valid. A valid SpanID does not consist + // of zeros only. +@@ -338,8 +343,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { + // create a Span and it is then up to the operation the Span represents to + // properly end the Span when the operation itself ends. + // +-// Warning: methods may be added to this interface in minor releases. ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. + type Span interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Span ++ + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this +@@ -364,8 +376,9 @@ type Span interface { + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a +- // description, overriding previous values set. The description is only +- // included in a status when the code is for an error. ++ // description, provided the status hasn't already been set to a higher ++ // value before (OK > Error > Unset). The description is only included in a ++ // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. +@@ -485,8 +498,15 @@ func (sk SpanKind) String() string { + + // Tracer is the creator of Spans. + // +-// Warning: methods may be added to this interface in minor releases. ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. + type Tracer interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.Tracer ++ + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created +@@ -517,8 +537,15 @@ type Tracer interface { + // at runtime from its users or it can simply use the globally registered one + // (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). + // +-// Warning: methods may be added to this interface in minor releases. ++// Warning: Methods may be added to this interface in minor releases. See ++// package documentation on API implementation for information on how to set ++// default behavior for unimplemented methods. + type TracerProvider interface { ++ // Users of the interface can ignore this. This embedded type is only used ++ // by implementations of this interface. See the "API Implementations" ++ // section of the package documentation for more information. ++ embedded.TracerProvider ++ + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. +diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go +index ca68a82e5f7..d1e47ca2faa 100644 +--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go ++++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go +@@ -28,9 +28,9 @@ const ( + + // based on the W3C Trace Context specification, see + // https://www.w3.org/TR/trace-context-1/#tracestate-header +- noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` +- withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` +- valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` ++ noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*` ++ withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*` ++ valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" +@@ -40,9 +40,10 @@ const ( + ) + + var ( +- keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) +- valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) +- memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) ++ noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`) ++ withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`) ++ valueRe = regexp.MustCompile(`^` + valueFormat + `$`) ++ memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) + ) + + type member struct { +@@ -51,10 +52,19 @@ type member struct { + } + + func newMember(key, value string) (member, error) { +- if !keyRe.MatchString(key) { ++ if len(key) > 256 { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } +- if !valueRe.MatchString(value) { ++ if !noTenantKeyRe.MatchString(key) { ++ if !withTenantKeyRe.MatchString(key) { ++ return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) ++ } ++ atIndex := strings.LastIndex(key, "@") ++ if atIndex > 241 || len(key)-1-atIndex > 14 { ++ return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) ++ } ++ } ++ if len(value) > 256 || !valueRe.MatchString(value) { + return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) + } + return member{Key: key, Value: value}, nil +@@ -62,14 +72,14 @@ func newMember(key, value string) (member, error) { + + func parseMember(m string) (member, error) { + matches := memberRe.FindStringSubmatch(m) +- if len(matches) != 5 { ++ if len(matches) != 3 { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } +- +- return member{ +- Key: matches[1], +- Value: matches[4], +- }, nil ++ result, e := newMember(matches[1], matches[2]) ++ if e != nil { ++ return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) ++ } ++ return result, nil + } + + // String encodes member into a string compliant with the W3C Trace Context +diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go +index 806db41c555..5a92f1d4b6c 100644 +--- a/vendor/go.opentelemetry.io/otel/version.go ++++ b/vendor/go.opentelemetry.io/otel/version.go +@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" + + // Version is the current release version of OpenTelemetry in use. + func Version() string { +- return "1.10.0" ++ return "1.20.0" + } +diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml +index ec2ca16d270..82366e79981 100644 +--- a/vendor/go.opentelemetry.io/otel/versions.yaml ++++ b/vendor/go.opentelemetry.io/otel/versions.yaml +@@ -14,45 +14,42 @@ + + module-sets: + stable-v1: +- version: v1.10.0 ++ version: v1.20.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opentracing ++ - go.opentelemetry.io/otel/bridge/opentracing/test ++ - go.opentelemetry.io/otel/example/dice + - go.opentelemetry.io/otel/example/fib +- - go.opentelemetry.io/otel/example/jaeger + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin +- - go.opentelemetry.io/otel/exporters/jaeger +- - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp +- - go.opentelemetry.io/otel/exporters/otlp/internal/retry + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace +- - go.opentelemetry.io/otel/trace ++ - go.opentelemetry.io/otel/exporters/zipkin ++ - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk ++ - go.opentelemetry.io/otel/sdk/metric ++ - go.opentelemetry.io/otel/trace + experimental-metrics: +- version: v0.31.0 ++ version: v0.43.0 + modules: ++ - go.opentelemetry.io/otel/bridge/opencensus ++ - go.opentelemetry.io/otel/bridge/opencensus/test ++ - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/prometheus ++ - go.opentelemetry.io/otel/example/view + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/prometheus + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric +- - go.opentelemetry.io/otel/metric +- - go.opentelemetry.io/otel/sdk/metric + experimental-schema: +- version: v0.0.3 ++ version: v0.0.7 + modules: + - go.opentelemetry.io/otel/schema +- bridge: +- version: v0.31.0 +- modules: +- - go.opentelemetry.io/otel/bridge/opencensus +- - go.opentelemetry.io/otel/bridge/opencensus/test +- - go.opentelemetry.io/otel/example/opencensus + excluded-modules: + - go.opentelemetry.io/otel/internal/tools +diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go +index fc285c089e7..c1af04e84e5 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go +@@ -15,7 +15,7 @@ + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: + // protoc-gen-go v1.26.0 +-// protoc v3.17.3 ++// protoc v3.21.6 + // source: opentelemetry/proto/collector/trace/v1/trace_service.proto + + package v1 +diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go +index d142c2a447d..bb1bd261ed8 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go +@@ -77,20 +77,22 @@ func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMu + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) +- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace")) ++ var err error ++ var annotatedContext context.Context ++ annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } +- resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams) ++ resp, md, err := local_request_TraceService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) +- ctx = runtime.NewServerMetadataContext(ctx, md) ++ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { +- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) ++ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + +- forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) ++ forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + +@@ -139,19 +141,21 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) +- rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace")) ++ var err error ++ var annotatedContext context.Context ++ annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } +- resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) +- ctx = runtime.NewServerMetadataContext(ctx, md) ++ resp, md, err := request_TraceService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams) ++ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { +- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) ++ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + +- forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) ++ forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + +@@ -159,7 +163,7 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu + } + + var ( +- pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) ++ pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "traces"}, "")) + ) + + var ( +diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go +index c21f2cb47cf..dd1b73f1e99 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go +@@ -1,7 +1,7 @@ + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. + // versions: + // - protoc-gen-go-grpc v1.1.0 +-// - protoc v3.17.3 ++// - protoc v3.21.6 + // source: opentelemetry/proto/collector/trace/v1/trace_service.proto + + package v1 +diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +index 8502e607b25..852209b097b 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +@@ -15,7 +15,7 @@ + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: + // protoc-gen-go v1.26.0 +-// protoc v3.17.3 ++// protoc v3.21.6 + // source: opentelemetry/proto/common/v1/common.proto + + package v1 +@@ -361,8 +361,11 @@ type InstrumentationScope struct { + unknownFields protoimpl.UnknownFields + + // An empty instrumentation scope name means the name is unknown. +- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +- Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` ++ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` ++ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` ++ // Additional attributes that describe the scope. [Optional]. ++ // Attribute keys MUST be unique (it is not allowed to have more than one ++ // attribute with the same key). + Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + } +diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +index bcc1060e3dd..b7545b03b9f 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +@@ -15,7 +15,7 @@ + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: + // protoc-gen-go v1.26.0 +-// protoc v3.17.3 ++// protoc v3.21.6 + // source: opentelemetry/proto/resource/v1/resource.proto + + package v1 +diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +index 499a43d77bb..51a499816a6 100644 +--- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go ++++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +@@ -15,7 +15,7 @@ + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: + // protoc-gen-go v1.26.0 +-// protoc v3.17.3 ++// protoc v3.21.6 + // source: opentelemetry/proto/trace/v1/trace.proto + + package v1 +@@ -117,8 +117,8 @@ type Status_StatusCode int32 + const ( + // The default status. + Status_STATUS_CODE_UNSET Status_StatusCode = 0 +- // The Span has been validated by an Application developers or Operator to have +- // completed successfully. ++ // The Span has been validated by an Application developer or Operator to ++ // have completed successfully. + Status_STATUS_CODE_OK Status_StatusCode = 1 + // The Span contains an error. + Status_STATUS_CODE_ERROR Status_StatusCode = 2 +@@ -374,20 +374,16 @@ type Span struct { + unknownFields protoimpl.UnknownFields + + // A unique identifier for a trace. All spans from the same trace share +- // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes +- // is considered invalid. +- // +- // This field is semantically required. Receiver should generate new +- // random trace_id if empty or invalid trace_id was received. ++ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR ++ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON ++ // is zero-length and thus is also invalid). + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span +- // is created. The ID is an 8-byte array. An ID with all zeroes is considered +- // invalid. +- // +- // This field is semantically required. Receiver should generate new +- // random span_id if empty or invalid span_id was received. ++ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length ++ // other than 8 bytes is considered invalid (empty string in OTLP/JSON ++ // is zero-length and thus is also invalid). + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` +@@ -433,8 +429,8 @@ type Span struct { + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 +- // "abc.com/myattribute": true +- // "abc.com/score": 10.239 ++ // "example.com/myattribute": true ++ // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go +new file mode 100644 +index 00000000000..93da7322bc4 +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go +@@ -0,0 +1,98 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its ++// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and ++// draft-irtf-cfrg-xchacha-01. ++package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" ++ ++import ( ++ "crypto/cipher" ++ "errors" ++) ++ ++const ( ++ // KeySize is the size of the key used by this AEAD, in bytes. ++ KeySize = 32 ++ ++ // NonceSize is the size of the nonce used with the standard variant of this ++ // AEAD, in bytes. ++ // ++ // Note that this is too short to be safely generated at random if the same ++ // key is reused more than 2³² times. ++ NonceSize = 12 ++ ++ // NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305 ++ // variant of this AEAD, in bytes. ++ NonceSizeX = 24 ++ ++ // Overhead is the size of the Poly1305 authentication tag, and the ++ // difference between a ciphertext length and its plaintext. ++ Overhead = 16 ++) ++ ++type chacha20poly1305 struct { ++ key [KeySize]byte ++} ++ ++// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key. ++func New(key []byte) (cipher.AEAD, error) { ++ if len(key) != KeySize { ++ return nil, errors.New("chacha20poly1305: bad key length") ++ } ++ ret := new(chacha20poly1305) ++ copy(ret.key[:], key) ++ return ret, nil ++} ++ ++func (c *chacha20poly1305) NonceSize() int { ++ return NonceSize ++} ++ ++func (c *chacha20poly1305) Overhead() int { ++ return Overhead ++} ++ ++func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { ++ if len(nonce) != NonceSize { ++ panic("chacha20poly1305: bad nonce length passed to Seal") ++ } ++ ++ if uint64(len(plaintext)) > (1<<38)-64 { ++ panic("chacha20poly1305: plaintext too large") ++ } ++ ++ return c.seal(dst, nonce, plaintext, additionalData) ++} ++ ++var errOpen = errors.New("chacha20poly1305: message authentication failed") ++ ++func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { ++ if len(nonce) != NonceSize { ++ panic("chacha20poly1305: bad nonce length passed to Open") ++ } ++ if len(ciphertext) < 16 { ++ return nil, errOpen ++ } ++ if uint64(len(ciphertext)) > (1<<38)-48 { ++ panic("chacha20poly1305: ciphertext too large") ++ } ++ ++ return c.open(dst, nonce, ciphertext, additionalData) ++} ++ ++// sliceForAppend takes a slice and a requested number of bytes. It returns a ++// slice with the contents of the given slice followed by that many bytes and a ++// second slice that aliases into it and contains only the extra bytes. If the ++// original slice has sufficient capacity then no allocation is performed. ++func sliceForAppend(in []byte, n int) (head, tail []byte) { ++ if total := len(in) + n; cap(in) >= total { ++ head = in[:total] ++ } else { ++ head = make([]byte, total) ++ copy(head, in) ++ } ++ tail = head[len(in):] ++ return ++} +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go +new file mode 100644 +index 00000000000..50695a14f62 +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go +@@ -0,0 +1,86 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build gc && !purego ++ ++package chacha20poly1305 ++ ++import ( ++ "encoding/binary" ++ ++ "golang.org/x/crypto/internal/alias" ++ "golang.org/x/sys/cpu" ++) ++ ++//go:noescape ++func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool ++ ++//go:noescape ++func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte) ++ ++var ( ++ useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2 ++) ++ ++// setupState writes a ChaCha20 input matrix to state. See ++// https://tools.ietf.org/html/rfc7539#section-2.3. ++func setupState(state *[16]uint32, key *[32]byte, nonce []byte) { ++ state[0] = 0x61707865 ++ state[1] = 0x3320646e ++ state[2] = 0x79622d32 ++ state[3] = 0x6b206574 ++ ++ state[4] = binary.LittleEndian.Uint32(key[0:4]) ++ state[5] = binary.LittleEndian.Uint32(key[4:8]) ++ state[6] = binary.LittleEndian.Uint32(key[8:12]) ++ state[7] = binary.LittleEndian.Uint32(key[12:16]) ++ state[8] = binary.LittleEndian.Uint32(key[16:20]) ++ state[9] = binary.LittleEndian.Uint32(key[20:24]) ++ state[10] = binary.LittleEndian.Uint32(key[24:28]) ++ state[11] = binary.LittleEndian.Uint32(key[28:32]) ++ ++ state[12] = 0 ++ state[13] = binary.LittleEndian.Uint32(nonce[0:4]) ++ state[14] = binary.LittleEndian.Uint32(nonce[4:8]) ++ state[15] = binary.LittleEndian.Uint32(nonce[8:12]) ++} ++ ++func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { ++ if !cpu.X86.HasSSSE3 { ++ return c.sealGeneric(dst, nonce, plaintext, additionalData) ++ } ++ ++ var state [16]uint32 ++ setupState(&state, &c.key, nonce) ++ ++ ret, out := sliceForAppend(dst, len(plaintext)+16) ++ if alias.InexactOverlap(out, plaintext) { ++ panic("chacha20poly1305: invalid buffer overlap") ++ } ++ chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) ++ return ret ++} ++ ++func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { ++ if !cpu.X86.HasSSSE3 { ++ return c.openGeneric(dst, nonce, ciphertext, additionalData) ++ } ++ ++ var state [16]uint32 ++ setupState(&state, &c.key, nonce) ++ ++ ciphertext = ciphertext[:len(ciphertext)-16] ++ ret, out := sliceForAppend(dst, len(ciphertext)) ++ if alias.InexactOverlap(out, ciphertext) { ++ panic("chacha20poly1305: invalid buffer overlap") ++ } ++ if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { ++ for i := range out { ++ out[i] = 0 ++ } ++ return nil, errOpen ++ } ++ ++ return ret, nil ++} +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +new file mode 100644 +index 00000000000..731d2ac6dbc +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +@@ -0,0 +1,2715 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. ++ ++//go:build gc && !purego ++ ++#include "textflag.h" ++// General register allocation ++#define oup DI ++#define inp SI ++#define inl BX ++#define adp CX // free to reuse, after we hash the additional data ++#define keyp R8 // free to reuse, when we copy the key to stack ++#define itr2 R9 // general iterator ++#define itr1 CX // general iterator ++#define acc0 R10 ++#define acc1 R11 ++#define acc2 R12 ++#define t0 R13 ++#define t1 R14 ++#define t2 R15 ++#define t3 R8 ++// Register and stack allocation for the SSE code ++#define rStore (0*16)(BP) ++#define sStore (1*16)(BP) ++#define state1Store (2*16)(BP) ++#define state2Store (3*16)(BP) ++#define tmpStore (4*16)(BP) ++#define ctr0Store (5*16)(BP) ++#define ctr1Store (6*16)(BP) ++#define ctr2Store (7*16)(BP) ++#define ctr3Store (8*16)(BP) ++#define A0 X0 ++#define A1 X1 ++#define A2 X2 ++#define B0 X3 ++#define B1 X4 ++#define B2 X5 ++#define C0 X6 ++#define C1 X7 ++#define C2 X8 ++#define D0 X9 ++#define D1 X10 ++#define D2 X11 ++#define T0 X12 ++#define T1 X13 ++#define T2 X14 ++#define T3 X15 ++#define A3 T0 ++#define B3 T1 ++#define C3 T2 ++#define D3 T3 ++// Register and stack allocation for the AVX2 code ++#define rsStoreAVX2 (0*32)(BP) ++#define state1StoreAVX2 (1*32)(BP) ++#define state2StoreAVX2 (2*32)(BP) ++#define ctr0StoreAVX2 (3*32)(BP) ++#define ctr1StoreAVX2 (4*32)(BP) ++#define ctr2StoreAVX2 (5*32)(BP) ++#define ctr3StoreAVX2 (6*32)(BP) ++#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack ++#define AA0 Y0 ++#define AA1 Y5 ++#define AA2 Y6 ++#define AA3 Y7 ++#define BB0 Y14 ++#define BB1 Y9 ++#define BB2 Y10 ++#define BB3 Y11 ++#define CC0 Y12 ++#define CC1 Y13 ++#define CC2 Y8 ++#define CC3 Y15 ++#define DD0 Y4 ++#define DD1 Y1 ++#define DD2 Y2 ++#define DD3 Y3 ++#define TT0 DD3 ++#define TT1 AA3 ++#define TT2 BB3 ++#define TT3 CC3 ++// ChaCha20 constants ++DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 ++DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e ++DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 ++DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 ++DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 ++DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e ++DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 ++DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 ++// <<< 16 with PSHUFB ++DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 ++DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A ++DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 ++DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A ++// <<< 8 with PSHUFB ++DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 ++DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B ++DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 ++DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B ++ ++DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 ++DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 ++DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 ++DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 ++ ++DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 ++DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 ++DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 ++DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 ++// Poly1305 key clamp ++DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF ++DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC ++DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF ++DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF ++ ++DATA ·sseIncMask<>+0x00(SB)/8, $0x1 ++DATA ·sseIncMask<>+0x08(SB)/8, $0x0 ++// To load/store the last < 16 bytes in a buffer ++DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff ++DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff ++DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff ++DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff ++DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff ++DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff ++DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff ++DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 ++DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff ++DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff ++DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff ++DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff ++DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff ++DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff ++DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff ++DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff ++ ++GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 ++GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 ++GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 ++// No PALIGNR in Go ASM yet (but VPALIGNR is present). ++#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 ++#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 ++#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 ++#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 ++#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 ++#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 ++#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 ++#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 ++#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 ++#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 ++#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 ++#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 ++#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 ++#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 ++#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 ++#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 ++#define shiftC0Right shiftC0Left ++#define shiftC1Right shiftC1Left ++#define shiftC2Right shiftC2Left ++#define shiftC3Right shiftC3Left ++#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 ++#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 ++#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 ++#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 ++ ++// Some macros ++ ++// ROL rotates the uint32s in register R left by N bits, using temporary T. ++#define ROL(N, R, T) \ ++ MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R ++ ++// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. ++#ifdef GOAMD64_v2 ++#define ROL16(R, T) PSHUFB ·rol16<>(SB), R ++#else ++#define ROL16(R, T) ROL(16, R, T) ++#endif ++ ++// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. ++#ifdef GOAMD64_v2 ++#define ROL8(R, T) PSHUFB ·rol8<>(SB), R ++#else ++#define ROL8(R, T) ROL(8, R, T) ++#endif ++ ++#define chachaQR(A, B, C, D, T) \ ++ PADDD B, A; PXOR A, D; ROL16(D, T) \ ++ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ ++ PADDD B, A; PXOR A, D; ROL8(D, T) \ ++ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B ++ ++#define chachaQR_AVX2(A, B, C, D, T) \ ++ VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ ++ VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ ++ VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ ++ VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B ++ ++#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 ++#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 ++#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX ++#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 ++#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 ++ ++#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 ++#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 ++#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 ++ ++#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage ++#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage ++// ---------------------------------------------------------------------------- ++TEXT polyHashADInternal<>(SB), NOSPLIT, $0 ++ // adp points to beginning of additional data ++ // itr2 holds ad length ++ XORQ acc0, acc0 ++ XORQ acc1, acc1 ++ XORQ acc2, acc2 ++ CMPQ itr2, $13 ++ JNE hashADLoop ++ ++openFastTLSAD: ++ // Special treatment for the TLS case of 13 bytes ++ MOVQ (adp), acc0 ++ MOVQ 5(adp), acc1 ++ SHRQ $24, acc1 ++ MOVQ $1, acc2 ++ polyMul ++ RET ++ ++hashADLoop: ++ // Hash in 16 byte chunks ++ CMPQ itr2, $16 ++ JB hashADTail ++ polyAdd(0(adp)) ++ LEAQ (1*16)(adp), adp ++ SUBQ $16, itr2 ++ polyMul ++ JMP hashADLoop ++ ++hashADTail: ++ CMPQ itr2, $0 ++ JE hashADDone ++ ++ // Hash last < 16 byte tail ++ XORQ t0, t0 ++ XORQ t1, t1 ++ XORQ t2, t2 ++ ADDQ itr2, adp ++ ++hashADTailLoop: ++ SHLQ $8, t0, t1 ++ SHLQ $8, t0 ++ MOVB -1(adp), t2 ++ XORQ t2, t0 ++ DECQ adp ++ DECQ itr2 ++ JNE hashADTailLoop ++ ++hashADTailFinish: ++ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 ++ polyMul ++ ++ // Finished AD ++hashADDone: ++ RET ++ ++// ---------------------------------------------------------------------------- ++// func chacha20Poly1305Open(dst, key, src, ad []byte) bool ++TEXT ·chacha20Poly1305Open(SB), 0, $288-97 ++ // For aligned stack access ++ MOVQ SP, BP ++ ADDQ $32, BP ++ ANDQ $-32, BP ++ MOVQ dst+0(FP), oup ++ MOVQ key+24(FP), keyp ++ MOVQ src+48(FP), inp ++ MOVQ src_len+56(FP), inl ++ MOVQ ad+72(FP), adp ++ ++ // Check for AVX2 support ++ CMPB ·useAVX2(SB), $1 ++ JE chacha20Poly1305Open_AVX2 ++ ++ // Special optimization, for very short buffers ++ CMPQ inl, $128 ++ JBE openSSE128 // About 16% faster ++ ++ // For long buffers, prepare the poly key first ++ MOVOU ·chacha20Constants<>(SB), A0 ++ MOVOU (1*16)(keyp), B0 ++ MOVOU (2*16)(keyp), C0 ++ MOVOU (3*16)(keyp), D0 ++ MOVO D0, T1 ++ ++ // Store state on stack for future use ++ MOVO B0, state1Store ++ MOVO C0, state2Store ++ MOVO D0, ctr3Store ++ MOVQ $10, itr2 ++ ++openSSEPreparePolyKey: ++ chachaQR(A0, B0, C0, D0, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ chachaQR(A0, B0, C0, D0, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ DECQ itr2 ++ JNE openSSEPreparePolyKey ++ ++ // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded ++ PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 ++ ++ // Clamp and store the key ++ PAND ·polyClampMask<>(SB), A0 ++ MOVO A0, rStore; MOVO B0, sStore ++ ++ // Hash AAD ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ ++openSSEMainLoop: ++ CMPQ inl, $256 ++ JB openSSEMainLoopDone ++ ++ // Load state, increment counter blocks ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 ++ ++ // Store counters ++ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store ++ ++ // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 ++ MOVQ $4, itr1 ++ MOVQ inp, itr2 ++ ++openSSEInternalLoop: ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ polyAdd(0(itr2)) ++ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left ++ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left ++ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left ++ polyMulStage1 ++ polyMulStage2 ++ LEAQ (2*8)(itr2), itr2 ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ polyMulStage3 ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ polyMulReduceStage ++ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right ++ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right ++ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right ++ DECQ itr1 ++ JGE openSSEInternalLoop ++ ++ polyAdd(0(itr2)) ++ polyMul ++ LEAQ (2*8)(itr2), itr2 ++ ++ CMPQ itr1, $-6 ++ JG openSSEInternalLoop ++ ++ // Add in the state ++ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 ++ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 ++ PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 ++ PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 ++ ++ // Load - xor - store ++ MOVO D3, tmpStore ++ MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) ++ MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) ++ MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) ++ MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) ++ MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) ++ MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) ++ MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) ++ MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) ++ MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) ++ MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) ++ MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) ++ MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) ++ MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) ++ MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) ++ MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) ++ MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) ++ LEAQ 256(inp), inp ++ LEAQ 256(oup), oup ++ SUBQ $256, inl ++ JMP openSSEMainLoop ++ ++openSSEMainLoopDone: ++ // Handle the various tail sizes efficiently ++ TESTQ inl, inl ++ JE openSSEFinalize ++ CMPQ inl, $64 ++ JBE openSSETail64 ++ CMPQ inl, $128 ++ JBE openSSETail128 ++ CMPQ inl, $192 ++ JBE openSSETail192 ++ JMP openSSETail256 ++ ++openSSEFinalize: ++ // Hash in the PT, AAD lengths ++ ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 ++ polyMul ++ ++ // Final reduce ++ MOVQ acc0, t0 ++ MOVQ acc1, t1 ++ MOVQ acc2, t2 ++ SUBQ $-5, acc0 ++ SBBQ $-1, acc1 ++ SBBQ $3, acc2 ++ CMOVQCS t0, acc0 ++ CMOVQCS t1, acc1 ++ CMOVQCS t2, acc2 ++ ++ // Add in the "s" part of the key ++ ADDQ 0+sStore, acc0 ++ ADCQ 8+sStore, acc1 ++ ++ // Finally, constant time compare to the tag at the end of the message ++ XORQ AX, AX ++ MOVQ $1, DX ++ XORQ (0*8)(inp), acc0 ++ XORQ (1*8)(inp), acc1 ++ ORQ acc1, acc0 ++ CMOVQEQ DX, AX ++ ++ // Return true iff tags are equal ++ MOVB AX, ret+96(FP) ++ RET ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for buffers smaller than 129 bytes ++openSSE128: ++ // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks ++ MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 ++ MOVQ $10, itr2 ++ ++openSSE128InnerCipherLoop: ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Left; shiftB1Left; shiftB2Left ++ shiftC0Left; shiftC1Left; shiftC2Left ++ shiftD0Left; shiftD1Left; shiftD2Left ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Right; shiftB1Right; shiftB2Right ++ shiftC0Right; shiftC1Right; shiftC2Right ++ shiftD0Right; shiftD1Right; shiftD2Right ++ DECQ itr2 ++ JNE openSSE128InnerCipherLoop ++ ++ // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 ++ PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 ++ PADDL T2, C1; PADDL T2, C2 ++ PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 ++ ++ // Clamp and store the key ++ PAND ·polyClampMask<>(SB), A0 ++ MOVOU A0, rStore; MOVOU B0, sStore ++ ++ // Hash ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ ++openSSE128Open: ++ CMPQ inl, $16 ++ JB openSSETail16 ++ SUBQ $16, inl ++ ++ // Load for hashing ++ polyAdd(0(inp)) ++ ++ // Load for decryption ++ MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) ++ LEAQ (1*16)(inp), inp ++ LEAQ (1*16)(oup), oup ++ polyMul ++ ++ // Shift the stream "left" ++ MOVO B1, A1 ++ MOVO C1, B1 ++ MOVO D1, C1 ++ MOVO A2, D1 ++ MOVO B2, A2 ++ MOVO C2, B2 ++ MOVO D2, C2 ++ JMP openSSE128Open ++ ++openSSETail16: ++ TESTQ inl, inl ++ JE openSSEFinalize ++ ++ // We can safely load the CT from the end, because it is padded with the MAC ++ MOVQ inl, itr2 ++ SHLQ $4, itr2 ++ LEAQ ·andMask<>(SB), t0 ++ MOVOU (inp), T0 ++ ADDQ inl, inp ++ PAND -16(t0)(itr2*1), T0 ++ MOVO T0, 0+tmpStore ++ MOVQ T0, t0 ++ MOVQ 8+tmpStore, t1 ++ PXOR A1, T0 ++ ++ // We can only store one byte at a time, since plaintext can be shorter than 16 bytes ++openSSETail16Store: ++ MOVQ T0, t3 ++ MOVB t3, (oup) ++ PSRLDQ $1, T0 ++ INCQ oup ++ DECQ inl ++ JNE openSSETail16Store ++ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 ++ polyMul ++ JMP openSSEFinalize ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 64 bytes of ciphertext ++openSSETail64: ++ // Need to decrypt up to 64 bytes - prepare single block ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store ++ XORQ itr2, itr2 ++ MOVQ inl, itr1 ++ CMPQ itr1, $16 ++ JB openSSETail64LoopB ++ ++openSSETail64LoopA: ++ // Perform ChaCha rounds, while hashing the remaining input ++ polyAdd(0(inp)(itr2*1)) ++ polyMul ++ SUBQ $16, itr1 ++ ++openSSETail64LoopB: ++ ADDQ $16, itr2 ++ chachaQR(A0, B0, C0, D0, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ chachaQR(A0, B0, C0, D0, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ ++ CMPQ itr1, $16 ++ JAE openSSETail64LoopA ++ ++ CMPQ itr2, $160 ++ JNE openSSETail64LoopB ++ ++ PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 ++ ++openSSETail64DecLoop: ++ CMPQ inl, $16 ++ JB openSSETail64DecLoopDone ++ SUBQ $16, inl ++ MOVOU (inp), T0 ++ PXOR T0, A0 ++ MOVOU A0, (oup) ++ LEAQ 16(inp), inp ++ LEAQ 16(oup), oup ++ MOVO B0, A0 ++ MOVO C0, B0 ++ MOVO D0, C0 ++ JMP openSSETail64DecLoop ++ ++openSSETail64DecLoopDone: ++ MOVO A0, A1 ++ JMP openSSETail16 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 128 bytes of ciphertext ++openSSETail128: ++ // Need to decrypt up to 128 bytes - prepare two blocks ++ MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store ++ MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store ++ XORQ itr2, itr2 ++ MOVQ inl, itr1 ++ ANDQ $-16, itr1 ++ ++openSSETail128LoopA: ++ // Perform ChaCha rounds, while hashing the remaining input ++ polyAdd(0(inp)(itr2*1)) ++ polyMul ++ ++openSSETail128LoopB: ++ ADDQ $16, itr2 ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ shiftB1Left; shiftC1Left; shiftD1Left ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ shiftB1Right; shiftC1Right; shiftD1Right ++ ++ CMPQ itr2, itr1 ++ JB openSSETail128LoopA ++ ++ CMPQ itr2, $160 ++ JNE openSSETail128LoopB ++ ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 ++ PADDL state1Store, B0; PADDL state1Store, B1 ++ PADDL state2Store, C0; PADDL state2Store, C1 ++ PADDL ctr1Store, D0; PADDL ctr0Store, D1 ++ ++ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 ++ PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 ++ MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) ++ ++ SUBQ $64, inl ++ LEAQ 64(inp), inp ++ LEAQ 64(oup), oup ++ JMP openSSETail64DecLoop ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 192 bytes of ciphertext ++openSSETail192: ++ // Need to decrypt up to 192 bytes - prepare three blocks ++ MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store ++ MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store ++ MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store ++ ++ MOVQ inl, itr1 ++ MOVQ $160, itr2 ++ CMPQ itr1, $160 ++ CMOVQGT itr2, itr1 ++ ANDQ $-16, itr1 ++ XORQ itr2, itr2 ++ ++openSSLTail192LoopA: ++ // Perform ChaCha rounds, while hashing the remaining input ++ polyAdd(0(inp)(itr2*1)) ++ polyMul ++ ++openSSLTail192LoopB: ++ ADDQ $16, itr2 ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ shiftB1Left; shiftC1Left; shiftD1Left ++ shiftB2Left; shiftC2Left; shiftD2Left ++ ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ shiftB1Right; shiftC1Right; shiftD1Right ++ shiftB2Right; shiftC2Right; shiftD2Right ++ ++ CMPQ itr2, itr1 ++ JB openSSLTail192LoopA ++ ++ CMPQ itr2, $160 ++ JNE openSSLTail192LoopB ++ ++ CMPQ inl, $176 ++ JB openSSLTail192Store ++ ++ polyAdd(160(inp)) ++ polyMul ++ ++ CMPQ inl, $192 ++ JB openSSLTail192Store ++ ++ polyAdd(176(inp)) ++ polyMul ++ ++openSSLTail192Store: ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 ++ PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 ++ PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 ++ PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 ++ ++ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 ++ PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 ++ MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) ++ ++ MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 ++ PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 ++ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) ++ ++ SUBQ $128, inl ++ LEAQ 128(inp), inp ++ LEAQ 128(oup), oup ++ JMP openSSETail64DecLoop ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 256 bytes of ciphertext ++openSSETail256: ++ // Need to decrypt up to 256 bytes - prepare four blocks ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 ++ ++ // Store counters ++ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store ++ XORQ itr2, itr2 ++ ++openSSETail256Loop: ++ // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication ++ polyAdd(0(inp)(itr2*1)) ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left ++ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left ++ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left ++ polyMulStage1 ++ polyMulStage2 ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ polyMulStage3 ++ polyMulReduceStage ++ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right ++ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right ++ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right ++ ADDQ $2*8, itr2 ++ CMPQ itr2, $160 ++ JB openSSETail256Loop ++ MOVQ inl, itr1 ++ ANDQ $-16, itr1 ++ ++openSSETail256HashLoop: ++ polyAdd(0(inp)(itr2*1)) ++ polyMul ++ ADDQ $2*8, itr2 ++ CMPQ itr2, itr1 ++ JB openSSETail256HashLoop ++ ++ // Add in the state ++ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 ++ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 ++ PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 ++ PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 ++ MOVO D3, tmpStore ++ ++ // Load - xor - store ++ MOVOU (0*16)(inp), D3; PXOR D3, A0 ++ MOVOU (1*16)(inp), D3; PXOR D3, B0 ++ MOVOU (2*16)(inp), D3; PXOR D3, C0 ++ MOVOU (3*16)(inp), D3; PXOR D3, D0 ++ MOVOU A0, (0*16)(oup) ++ MOVOU B0, (1*16)(oup) ++ MOVOU C0, (2*16)(oup) ++ MOVOU D0, (3*16)(oup) ++ MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 ++ PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 ++ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) ++ MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 ++ PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 ++ MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) ++ LEAQ 192(inp), inp ++ LEAQ 192(oup), oup ++ SUBQ $192, inl ++ MOVO A3, A0 ++ MOVO B3, B0 ++ MOVO C3, C0 ++ MOVO tmpStore, D0 ++ ++ JMP openSSETail64DecLoop ++ ++// ---------------------------------------------------------------------------- ++// ------------------------- AVX2 Code ---------------------------------------- ++chacha20Poly1305Open_AVX2: ++ VZEROUPPER ++ VMOVDQU ·chacha20Constants<>(SB), AA0 ++ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 ++ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 ++ BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 ++ VPADDD ·avx2InitMask<>(SB), DD0, DD0 ++ ++ // Special optimization, for very short buffers ++ CMPQ inl, $192 ++ JBE openAVX2192 ++ CMPQ inl, $320 ++ JBE openAVX2320 ++ ++ // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream ++ VMOVDQA BB0, state1StoreAVX2 ++ VMOVDQA CC0, state2StoreAVX2 ++ VMOVDQA DD0, ctr3StoreAVX2 ++ MOVQ $10, itr2 ++ ++openAVX2PreparePolyKey: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 ++ DECQ itr2 ++ JNE openAVX2PreparePolyKey ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0 ++ VPADDD state1StoreAVX2, BB0, BB0 ++ VPADDD state2StoreAVX2, CC0, CC0 ++ VPADDD ctr3StoreAVX2, DD0, DD0 ++ ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ ++ // Clamp and store poly key ++ VPAND ·polyClampMask<>(SB), TT0, TT0 ++ VMOVDQA TT0, rsStoreAVX2 ++ ++ // Stream for the first 64 bytes ++ VPERM2I128 $0x13, AA0, BB0, AA0 ++ VPERM2I128 $0x13, CC0, DD0, BB0 ++ ++ // Hash AD + first 64 bytes ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ XORQ itr1, itr1 ++ ++openAVX2InitialHash64: ++ polyAdd(0(inp)(itr1*1)) ++ polyMulAVX2 ++ ADDQ $16, itr1 ++ CMPQ itr1, $64 ++ JNE openAVX2InitialHash64 ++ ++ // Decrypt the first 64 bytes ++ VPXOR (0*32)(inp), AA0, AA0 ++ VPXOR (1*32)(inp), BB0, BB0 ++ VMOVDQU AA0, (0*32)(oup) ++ VMOVDQU BB0, (1*32)(oup) ++ LEAQ (2*32)(inp), inp ++ LEAQ (2*32)(oup), oup ++ SUBQ $64, inl ++ ++openAVX2MainLoop: ++ CMPQ inl, $512 ++ JB openAVX2MainLoopDone ++ ++ // Load state, increment counter blocks, store the incremented counters ++ VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 ++ VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 ++ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 ++ XORQ itr1, itr1 ++ ++openAVX2InternalLoop: ++ // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications ++ // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext ++ polyAdd(0*8(inp)(itr1*1)) ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ polyMulStage1_AVX2 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ polyMulStage2_AVX2 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyMulStage3_AVX2 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulReduceStage ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ polyAdd(2*8(inp)(itr1*1)) ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ polyMulStage1_AVX2 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulStage2_AVX2 ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ polyMulStage3_AVX2 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ polyMulReduceStage ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyAdd(4*8(inp)(itr1*1)) ++ LEAQ (6*8)(itr1), itr1 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulStage1_AVX2 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ polyMulStage2_AVX2 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ polyMulStage3_AVX2 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulReduceStage ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 ++ CMPQ itr1, $480 ++ JNE openAVX2InternalLoop ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ ++ // We only hashed 480 of the 512 bytes available - hash the remaining 32 here ++ polyAdd(480(inp)) ++ polyMulAVX2 ++ VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 ++ VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 ++ VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) ++ ++ // and here ++ polyAdd(496(inp)) ++ polyMulAVX2 ++ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 ++ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) ++ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 ++ VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) ++ LEAQ (32*16)(inp), inp ++ LEAQ (32*16)(oup), oup ++ SUBQ $(32*16), inl ++ JMP openAVX2MainLoop ++ ++openAVX2MainLoopDone: ++ // Handle the various tail sizes efficiently ++ TESTQ inl, inl ++ JE openSSEFinalize ++ CMPQ inl, $128 ++ JBE openAVX2Tail128 ++ CMPQ inl, $256 ++ JBE openAVX2Tail256 ++ CMPQ inl, $384 ++ JBE openAVX2Tail384 ++ JMP openAVX2Tail512 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for buffers smaller than 193 bytes ++openAVX2192: ++ // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks ++ VMOVDQA AA0, AA1 ++ VMOVDQA BB0, BB1 ++ VMOVDQA CC0, CC1 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA AA0, AA2 ++ VMOVDQA BB0, BB2 ++ VMOVDQA CC0, CC2 ++ VMOVDQA DD0, DD2 ++ VMOVDQA DD1, TT3 ++ MOVQ $10, itr2 ++ ++openAVX2192InnerCipherLoop: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 ++ DECQ itr2 ++ JNE openAVX2192InnerCipherLoop ++ VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 ++ VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 ++ VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 ++ VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ ++ // Clamp and store poly key ++ VPAND ·polyClampMask<>(SB), TT0, TT0 ++ VMOVDQA TT0, rsStoreAVX2 ++ ++ // Stream for up to 192 bytes ++ VPERM2I128 $0x13, AA0, BB0, AA0 ++ VPERM2I128 $0x13, CC0, DD0, BB0 ++ VPERM2I128 $0x02, AA1, BB1, CC0 ++ VPERM2I128 $0x02, CC1, DD1, DD0 ++ VPERM2I128 $0x13, AA1, BB1, AA1 ++ VPERM2I128 $0x13, CC1, DD1, BB1 ++ ++openAVX2ShortOpen: ++ // Hash ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ ++openAVX2ShortOpenLoop: ++ CMPQ inl, $32 ++ JB openAVX2ShortTail32 ++ SUBQ $32, inl ++ ++ // Load for hashing ++ polyAdd(0*8(inp)) ++ polyMulAVX2 ++ polyAdd(2*8(inp)) ++ polyMulAVX2 ++ ++ // Load for decryption ++ VPXOR (inp), AA0, AA0 ++ VMOVDQU AA0, (oup) ++ LEAQ (1*32)(inp), inp ++ LEAQ (1*32)(oup), oup ++ ++ // Shift stream left ++ VMOVDQA BB0, AA0 ++ VMOVDQA CC0, BB0 ++ VMOVDQA DD0, CC0 ++ VMOVDQA AA1, DD0 ++ VMOVDQA BB1, AA1 ++ VMOVDQA CC1, BB1 ++ VMOVDQA DD1, CC1 ++ VMOVDQA AA2, DD1 ++ VMOVDQA BB2, AA2 ++ JMP openAVX2ShortOpenLoop ++ ++openAVX2ShortTail32: ++ CMPQ inl, $16 ++ VMOVDQA A0, A1 ++ JB openAVX2ShortDone ++ ++ SUBQ $16, inl ++ ++ // Load for hashing ++ polyAdd(0*8(inp)) ++ polyMulAVX2 ++ ++ // Load for decryption ++ VPXOR (inp), A0, T0 ++ VMOVDQU T0, (oup) ++ LEAQ (1*16)(inp), inp ++ LEAQ (1*16)(oup), oup ++ VPERM2I128 $0x11, AA0, AA0, AA0 ++ VMOVDQA A0, A1 ++ ++openAVX2ShortDone: ++ VZEROUPPER ++ JMP openSSETail16 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for buffers smaller than 321 bytes ++openAVX2320: ++ // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks ++ VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 ++ VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 ++ MOVQ $10, itr2 ++ ++openAVX2320InnerCipherLoop: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 ++ DECQ itr2 ++ JNE openAVX2320InnerCipherLoop ++ ++ VMOVDQA ·chacha20Constants<>(SB), TT0 ++ VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 ++ VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 ++ VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 ++ VMOVDQA ·avx2IncMask<>(SB), TT0 ++ VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 ++ VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 ++ VPADDD TT3, DD2, DD2 ++ ++ // Clamp and store poly key ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ VPAND ·polyClampMask<>(SB), TT0, TT0 ++ VMOVDQA TT0, rsStoreAVX2 ++ ++ // Stream for up to 320 bytes ++ VPERM2I128 $0x13, AA0, BB0, AA0 ++ VPERM2I128 $0x13, CC0, DD0, BB0 ++ VPERM2I128 $0x02, AA1, BB1, CC0 ++ VPERM2I128 $0x02, CC1, DD1, DD0 ++ VPERM2I128 $0x13, AA1, BB1, AA1 ++ VPERM2I128 $0x13, CC1, DD1, BB1 ++ VPERM2I128 $0x02, AA2, BB2, CC1 ++ VPERM2I128 $0x02, CC2, DD2, DD1 ++ VPERM2I128 $0x13, AA2, BB2, AA2 ++ VPERM2I128 $0x13, CC2, DD2, BB2 ++ JMP openAVX2ShortOpen ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 128 bytes of ciphertext ++openAVX2Tail128: ++ // Need to decrypt up to 128 bytes - prepare two blocks ++ VMOVDQA ·chacha20Constants<>(SB), AA1 ++ VMOVDQA state1StoreAVX2, BB1 ++ VMOVDQA state2StoreAVX2, CC1 ++ VMOVDQA ctr3StoreAVX2, DD1 ++ VPADDD ·avx2IncMask<>(SB), DD1, DD1 ++ VMOVDQA DD1, DD0 ++ ++ XORQ itr2, itr2 ++ MOVQ inl, itr1 ++ ANDQ $-16, itr1 ++ TESTQ itr1, itr1 ++ JE openAVX2Tail128LoopB ++ ++openAVX2Tail128LoopA: ++ // Perform ChaCha rounds, while hashing the remaining input ++ polyAdd(0(inp)(itr2*1)) ++ polyMulAVX2 ++ ++openAVX2Tail128LoopB: ++ ADDQ $16, itr2 ++ chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $4, BB1, BB1, BB1 ++ VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $12, DD1, DD1, DD1 ++ chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $12, BB1, BB1, BB1 ++ VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $4, DD1, DD1, DD1 ++ CMPQ itr2, itr1 ++ JB openAVX2Tail128LoopA ++ CMPQ itr2, $160 ++ JNE openAVX2Tail128LoopB ++ ++ VPADDD ·chacha20Constants<>(SB), AA1, AA1 ++ VPADDD state1StoreAVX2, BB1, BB1 ++ VPADDD state2StoreAVX2, CC1, CC1 ++ VPADDD DD0, DD1, DD1 ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ ++openAVX2TailLoop: ++ CMPQ inl, $32 ++ JB openAVX2Tail ++ SUBQ $32, inl ++ ++ // Load for decryption ++ VPXOR (inp), AA0, AA0 ++ VMOVDQU AA0, (oup) ++ LEAQ (1*32)(inp), inp ++ LEAQ (1*32)(oup), oup ++ VMOVDQA BB0, AA0 ++ VMOVDQA CC0, BB0 ++ VMOVDQA DD0, CC0 ++ JMP openAVX2TailLoop ++ ++openAVX2Tail: ++ CMPQ inl, $16 ++ VMOVDQA A0, A1 ++ JB openAVX2TailDone ++ SUBQ $16, inl ++ ++ // Load for decryption ++ VPXOR (inp), A0, T0 ++ VMOVDQU T0, (oup) ++ LEAQ (1*16)(inp), inp ++ LEAQ (1*16)(oup), oup ++ VPERM2I128 $0x11, AA0, AA0, AA0 ++ VMOVDQA A0, A1 ++ ++openAVX2TailDone: ++ VZEROUPPER ++ JMP openSSETail16 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 256 bytes of ciphertext ++openAVX2Tail256: ++ // Need to decrypt up to 256 bytes - prepare four blocks ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA DD0, TT1 ++ VMOVDQA DD1, TT2 ++ ++ // Compute the number of iterations that will hash data ++ MOVQ inl, tmpStoreAVX2 ++ MOVQ inl, itr1 ++ SUBQ $128, itr1 ++ SHRQ $4, itr1 ++ MOVQ $10, itr2 ++ CMPQ itr1, $10 ++ CMOVQGT itr2, itr1 ++ MOVQ inp, inl ++ XORQ itr2, itr2 ++ ++openAVX2Tail256LoopA: ++ polyAdd(0(inl)) ++ polyMulAVX2 ++ LEAQ 16(inl), inl ++ ++ // Perform ChaCha rounds, while hashing the remaining input ++openAVX2Tail256LoopB: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 ++ INCQ itr2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 ++ CMPQ itr2, itr1 ++ JB openAVX2Tail256LoopA ++ ++ CMPQ itr2, $10 ++ JNE openAVX2Tail256LoopB ++ ++ MOVQ inl, itr2 ++ SUBQ inp, inl ++ MOVQ inl, itr1 ++ MOVQ tmpStoreAVX2, inl ++ ++ // Hash the remainder of data (if any) ++openAVX2Tail256Hash: ++ ADDQ $16, itr1 ++ CMPQ itr1, inl ++ JGT openAVX2Tail256HashEnd ++ polyAdd (0(itr2)) ++ polyMulAVX2 ++ LEAQ 16(itr2), itr2 ++ JMP openAVX2Tail256Hash ++ ++// Store 128 bytes safely, then go to store loop ++openAVX2Tail256HashEnd: ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 ++ VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 ++ VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ ++ VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 ++ VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) ++ LEAQ (4*32)(inp), inp ++ LEAQ (4*32)(oup), oup ++ SUBQ $4*32, inl ++ ++ JMP openAVX2TailLoop ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 384 bytes of ciphertext ++openAVX2Tail384: ++ // Need to decrypt up to 384 bytes - prepare six blocks ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VPADDD ·avx2IncMask<>(SB), DD1, DD2 ++ VMOVDQA DD0, ctr0StoreAVX2 ++ VMOVDQA DD1, ctr1StoreAVX2 ++ VMOVDQA DD2, ctr2StoreAVX2 ++ ++ // Compute the number of iterations that will hash two blocks of data ++ MOVQ inl, tmpStoreAVX2 ++ MOVQ inl, itr1 ++ SUBQ $256, itr1 ++ SHRQ $4, itr1 ++ ADDQ $6, itr1 ++ MOVQ $10, itr2 ++ CMPQ itr1, $10 ++ CMOVQGT itr2, itr1 ++ MOVQ inp, inl ++ XORQ itr2, itr2 ++ ++ // Perform ChaCha rounds, while hashing the remaining input ++openAVX2Tail384LoopB: ++ polyAdd(0(inl)) ++ polyMulAVX2 ++ LEAQ 16(inl), inl ++ ++openAVX2Tail384LoopA: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 ++ polyAdd(0(inl)) ++ polyMulAVX2 ++ LEAQ 16(inl), inl ++ INCQ itr2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 ++ ++ CMPQ itr2, itr1 ++ JB openAVX2Tail384LoopB ++ ++ CMPQ itr2, $10 ++ JNE openAVX2Tail384LoopA ++ ++ MOVQ inl, itr2 ++ SUBQ inp, inl ++ MOVQ inl, itr1 ++ MOVQ tmpStoreAVX2, inl ++ ++openAVX2Tail384Hash: ++ ADDQ $16, itr1 ++ CMPQ itr1, inl ++ JGT openAVX2Tail384HashEnd ++ polyAdd(0(itr2)) ++ polyMulAVX2 ++ LEAQ 16(itr2), itr2 ++ JMP openAVX2Tail384Hash ++ ++// Store 256 bytes safely, then go to store loop ++openAVX2Tail384HashEnd: ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 ++ VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 ++ VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 ++ VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) ++ VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 ++ VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 ++ VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) ++ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 ++ LEAQ (8*32)(inp), inp ++ LEAQ (8*32)(oup), oup ++ SUBQ $8*32, inl ++ JMP openAVX2TailLoop ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 512 bytes of ciphertext ++openAVX2Tail512: ++ VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 ++ VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 ++ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 ++ XORQ itr1, itr1 ++ MOVQ inp, itr2 ++ ++openAVX2Tail512LoopB: ++ polyAdd(0(itr2)) ++ polyMulAVX2 ++ LEAQ (2*8)(itr2), itr2 ++ ++openAVX2Tail512LoopA: ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyAdd(0*8(itr2)) ++ polyMulAVX2 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyAdd(2*8(itr2)) ++ polyMulAVX2 ++ LEAQ (4*8)(itr2), itr2 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 ++ INCQ itr1 ++ CMPQ itr1, $4 ++ JLT openAVX2Tail512LoopB ++ ++ CMPQ itr1, $10 ++ JNE openAVX2Tail512LoopA ++ ++ MOVQ inl, itr1 ++ SUBQ $384, itr1 ++ ANDQ $-16, itr1 ++ ++openAVX2Tail512HashLoop: ++ TESTQ itr1, itr1 ++ JE openAVX2Tail512HashEnd ++ polyAdd(0(itr2)) ++ polyMulAVX2 ++ LEAQ 16(itr2), itr2 ++ SUBQ $16, itr1 ++ JMP openAVX2Tail512HashLoop ++ ++openAVX2Tail512HashEnd: ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 ++ VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 ++ VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) ++ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 ++ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) ++ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 ++ ++ LEAQ (12*32)(inp), inp ++ LEAQ (12*32)(oup), oup ++ SUBQ $12*32, inl ++ ++ JMP openAVX2TailLoop ++ ++// ---------------------------------------------------------------------------- ++// ---------------------------------------------------------------------------- ++// func chacha20Poly1305Seal(dst, key, src, ad []byte) ++TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 ++ // For aligned stack access ++ MOVQ SP, BP ++ ADDQ $32, BP ++ ANDQ $-32, BP ++ MOVQ dst+0(FP), oup ++ MOVQ key+24(FP), keyp ++ MOVQ src+48(FP), inp ++ MOVQ src_len+56(FP), inl ++ MOVQ ad+72(FP), adp ++ ++ CMPB ·useAVX2(SB), $1 ++ JE chacha20Poly1305Seal_AVX2 ++ ++ // Special optimization, for very short buffers ++ CMPQ inl, $128 ++ JBE sealSSE128 // About 15% faster ++ ++ // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration ++ MOVOU ·chacha20Constants<>(SB), A0 ++ MOVOU (1*16)(keyp), B0 ++ MOVOU (2*16)(keyp), C0 ++ MOVOU (3*16)(keyp), D0 ++ ++ // Store state on stack for future use ++ MOVO B0, state1Store ++ MOVO C0, state2Store ++ ++ // Load state, increment counter blocks ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 ++ ++ // Store counters ++ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store ++ MOVQ $10, itr2 ++ ++sealSSEIntroLoop: ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left ++ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left ++ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left ++ ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right ++ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right ++ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right ++ DECQ itr2 ++ JNE sealSSEIntroLoop ++ ++ // Add in the state ++ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 ++ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 ++ PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 ++ PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 ++ ++ // Clamp and store the key ++ PAND ·polyClampMask<>(SB), A0 ++ MOVO A0, rStore ++ MOVO B0, sStore ++ ++ // Hash AAD ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ ++ MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 ++ PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 ++ MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) ++ MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 ++ PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 ++ MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) ++ ++ MOVQ $128, itr1 ++ SUBQ $128, inl ++ LEAQ 128(inp), inp ++ ++ MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 ++ ++ CMPQ inl, $64 ++ JBE sealSSE128SealHash ++ ++ MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 ++ PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 ++ MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) ++ ++ ADDQ $64, itr1 ++ SUBQ $64, inl ++ LEAQ 64(inp), inp ++ ++ MOVQ $2, itr1 ++ MOVQ $8, itr2 ++ ++ CMPQ inl, $64 ++ JBE sealSSETail64 ++ CMPQ inl, $128 ++ JBE sealSSETail128 ++ CMPQ inl, $192 ++ JBE sealSSETail192 ++ ++sealSSEMainLoop: ++ // Load state, increment counter blocks ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 ++ ++ // Store counters ++ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store ++ ++sealSSEInnerLoop: ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ polyAdd(0(oup)) ++ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left ++ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left ++ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left ++ polyMulStage1 ++ polyMulStage2 ++ LEAQ (2*8)(oup), oup ++ MOVO C3, tmpStore ++ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) ++ MOVO tmpStore, C3 ++ MOVO C1, tmpStore ++ polyMulStage3 ++ chachaQR(A3, B3, C3, D3, C1) ++ MOVO tmpStore, C1 ++ polyMulReduceStage ++ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right ++ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right ++ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right ++ DECQ itr2 ++ JGE sealSSEInnerLoop ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ (2*8)(oup), oup ++ DECQ itr1 ++ JG sealSSEInnerLoop ++ ++ // Add in the state ++ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 ++ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 ++ PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 ++ PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 ++ MOVO D3, tmpStore ++ ++ // Load - xor - store ++ MOVOU (0*16)(inp), D3; PXOR D3, A0 ++ MOVOU (1*16)(inp), D3; PXOR D3, B0 ++ MOVOU (2*16)(inp), D3; PXOR D3, C0 ++ MOVOU (3*16)(inp), D3; PXOR D3, D0 ++ MOVOU A0, (0*16)(oup) ++ MOVOU B0, (1*16)(oup) ++ MOVOU C0, (2*16)(oup) ++ MOVOU D0, (3*16)(oup) ++ MOVO tmpStore, D3 ++ ++ MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 ++ PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 ++ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) ++ MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 ++ PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 ++ MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) ++ ADDQ $192, inp ++ MOVQ $192, itr1 ++ SUBQ $192, inl ++ MOVO A3, A1 ++ MOVO B3, B1 ++ MOVO C3, C1 ++ MOVO D3, D1 ++ CMPQ inl, $64 ++ JBE sealSSE128SealHash ++ MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 ++ PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 ++ MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) ++ LEAQ 64(inp), inp ++ SUBQ $64, inl ++ MOVQ $6, itr1 ++ MOVQ $4, itr2 ++ CMPQ inl, $192 ++ JG sealSSEMainLoop ++ ++ MOVQ inl, itr1 ++ TESTQ inl, inl ++ JE sealSSE128SealHash ++ MOVQ $6, itr1 ++ CMPQ inl, $64 ++ JBE sealSSETail64 ++ CMPQ inl, $128 ++ JBE sealSSETail128 ++ JMP sealSSETail192 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 64 bytes of plaintext ++sealSSETail64: ++ // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes ++ MOVO ·chacha20Constants<>(SB), A1 ++ MOVO state1Store, B1 ++ MOVO state2Store, C1 ++ MOVO ctr3Store, D1 ++ PADDL ·sseIncMask<>(SB), D1 ++ MOVO D1, ctr0Store ++ ++sealSSETail64LoopA: ++ // Perform ChaCha rounds, while hashing the previously encrypted ciphertext ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealSSETail64LoopB: ++ chachaQR(A1, B1, C1, D1, T1) ++ shiftB1Left; shiftC1Left; shiftD1Left ++ chachaQR(A1, B1, C1, D1, T1) ++ shiftB1Right; shiftC1Right; shiftD1Right ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++ DECQ itr1 ++ JG sealSSETail64LoopA ++ ++ DECQ itr2 ++ JGE sealSSETail64LoopB ++ PADDL ·chacha20Constants<>(SB), A1 ++ PADDL state1Store, B1 ++ PADDL state2Store, C1 ++ PADDL ctr0Store, D1 ++ ++ JMP sealSSE128Seal ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 128 bytes of plaintext ++sealSSETail128: ++ // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store ++ ++sealSSETail128LoopA: ++ // Perform ChaCha rounds, while hashing the previously encrypted ciphertext ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealSSETail128LoopB: ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ shiftB1Left; shiftC1Left; shiftD1Left ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ shiftB1Right; shiftC1Right; shiftD1Right ++ ++ DECQ itr1 ++ JG sealSSETail128LoopA ++ ++ DECQ itr2 ++ JGE sealSSETail128LoopB ++ ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 ++ PADDL state1Store, B0; PADDL state1Store, B1 ++ PADDL state2Store, C0; PADDL state2Store, C1 ++ PADDL ctr0Store, D0; PADDL ctr1Store, D1 ++ ++ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 ++ PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 ++ MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) ++ ++ MOVQ $64, itr1 ++ LEAQ 64(inp), inp ++ SUBQ $64, inl ++ ++ JMP sealSSE128SealHash ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 192 bytes of plaintext ++sealSSETail192: ++ // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes ++ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store ++ ++sealSSETail192LoopA: ++ // Perform ChaCha rounds, while hashing the previously encrypted ciphertext ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealSSETail192LoopB: ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Left; shiftC0Left; shiftD0Left ++ shiftB1Left; shiftC1Left; shiftD1Left ++ shiftB2Left; shiftC2Left; shiftD2Left ++ ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Right; shiftC0Right; shiftD0Right ++ shiftB1Right; shiftC1Right; shiftD1Right ++ shiftB2Right; shiftC2Right; shiftD2Right ++ ++ DECQ itr1 ++ JG sealSSETail192LoopA ++ ++ DECQ itr2 ++ JGE sealSSETail192LoopB ++ ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 ++ PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 ++ PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 ++ PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 ++ ++ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 ++ PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 ++ MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) ++ MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 ++ PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 ++ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) ++ ++ MOVO A2, A1 ++ MOVO B2, B1 ++ MOVO C2, C1 ++ MOVO D2, D1 ++ MOVQ $128, itr1 ++ LEAQ 128(inp), inp ++ SUBQ $128, inl ++ ++ JMP sealSSE128SealHash ++ ++// ---------------------------------------------------------------------------- ++// Special seal optimization for buffers smaller than 129 bytes ++sealSSE128: ++ // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks ++ MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 ++ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 ++ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 ++ MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 ++ MOVQ $10, itr2 ++ ++sealSSE128InnerCipherLoop: ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Left; shiftB1Left; shiftB2Left ++ shiftC0Left; shiftC1Left; shiftC2Left ++ shiftD0Left; shiftD1Left; shiftD2Left ++ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) ++ shiftB0Right; shiftB1Right; shiftB2Right ++ shiftC0Right; shiftC1Right; shiftC2Right ++ shiftD0Right; shiftD1Right; shiftD2Right ++ DECQ itr2 ++ JNE sealSSE128InnerCipherLoop ++ ++ // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded ++ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 ++ PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 ++ PADDL T2, C1; PADDL T2, C2 ++ PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 ++ PAND ·polyClampMask<>(SB), A0 ++ MOVOU A0, rStore ++ MOVOU B0, sStore ++ ++ // Hash ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ XORQ itr1, itr1 ++ ++sealSSE128SealHash: ++ // itr1 holds the number of bytes encrypted but not yet hashed ++ CMPQ itr1, $16 ++ JB sealSSE128Seal ++ polyAdd(0(oup)) ++ polyMul ++ ++ SUBQ $16, itr1 ++ ADDQ $16, oup ++ ++ JMP sealSSE128SealHash ++ ++sealSSE128Seal: ++ CMPQ inl, $16 ++ JB sealSSETail ++ SUBQ $16, inl ++ ++ // Load for decryption ++ MOVOU (inp), T0 ++ PXOR T0, A1 ++ MOVOU A1, (oup) ++ LEAQ (1*16)(inp), inp ++ LEAQ (1*16)(oup), oup ++ ++ // Extract for hashing ++ MOVQ A1, t0 ++ PSRLDQ $8, A1 ++ MOVQ A1, t1 ++ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 ++ polyMul ++ ++ // Shift the stream "left" ++ MOVO B1, A1 ++ MOVO C1, B1 ++ MOVO D1, C1 ++ MOVO A2, D1 ++ MOVO B2, A2 ++ MOVO C2, B2 ++ MOVO D2, C2 ++ JMP sealSSE128Seal ++ ++sealSSETail: ++ TESTQ inl, inl ++ JE sealSSEFinalize ++ ++ // We can only load the PT one byte at a time to avoid read after end of buffer ++ MOVQ inl, itr2 ++ SHLQ $4, itr2 ++ LEAQ ·andMask<>(SB), t0 ++ MOVQ inl, itr1 ++ LEAQ -1(inp)(inl*1), inp ++ XORQ t2, t2 ++ XORQ t3, t3 ++ XORQ AX, AX ++ ++sealSSETailLoadLoop: ++ SHLQ $8, t2, t3 ++ SHLQ $8, t2 ++ MOVB (inp), AX ++ XORQ AX, t2 ++ LEAQ -1(inp), inp ++ DECQ itr1 ++ JNE sealSSETailLoadLoop ++ MOVQ t2, 0+tmpStore ++ MOVQ t3, 8+tmpStore ++ PXOR 0+tmpStore, A1 ++ MOVOU A1, (oup) ++ MOVOU -16(t0)(itr2*1), T0 ++ PAND T0, A1 ++ MOVQ A1, t0 ++ PSRLDQ $8, A1 ++ MOVQ A1, t1 ++ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 ++ polyMul ++ ++ ADDQ inl, oup ++ ++sealSSEFinalize: ++ // Hash in the buffer lengths ++ ADDQ ad_len+80(FP), acc0 ++ ADCQ src_len+56(FP), acc1 ++ ADCQ $1, acc2 ++ polyMul ++ ++ // Final reduce ++ MOVQ acc0, t0 ++ MOVQ acc1, t1 ++ MOVQ acc2, t2 ++ SUBQ $-5, acc0 ++ SBBQ $-1, acc1 ++ SBBQ $3, acc2 ++ CMOVQCS t0, acc0 ++ CMOVQCS t1, acc1 ++ CMOVQCS t2, acc2 ++ ++ // Add in the "s" part of the key ++ ADDQ 0+sStore, acc0 ++ ADCQ 8+sStore, acc1 ++ ++ // Finally store the tag at the end of the message ++ MOVQ acc0, (0*8)(oup) ++ MOVQ acc1, (1*8)(oup) ++ RET ++ ++// ---------------------------------------------------------------------------- ++// ------------------------- AVX2 Code ---------------------------------------- ++chacha20Poly1305Seal_AVX2: ++ VZEROUPPER ++ VMOVDQU ·chacha20Constants<>(SB), AA0 ++ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 ++ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 ++ BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 ++ VPADDD ·avx2InitMask<>(SB), DD0, DD0 ++ ++ // Special optimizations, for very short buffers ++ CMPQ inl, $192 ++ JBE seal192AVX2 // 33% faster ++ CMPQ inl, $320 ++ JBE seal320AVX2 // 17% faster ++ ++ // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream ++ VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 ++ VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 ++ VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 ++ VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 ++ VMOVDQA DD3, ctr3StoreAVX2 ++ MOVQ $10, itr2 ++ ++sealAVX2IntroLoop: ++ VMOVDQA CC3, tmpStoreAVX2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) ++ VMOVDQA tmpStoreAVX2, CC3 ++ VMOVDQA CC1, tmpStoreAVX2 ++ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) ++ VMOVDQA tmpStoreAVX2, CC1 ++ ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 ++ VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 ++ VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 ++ VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 ++ ++ VMOVDQA CC3, tmpStoreAVX2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) ++ VMOVDQA tmpStoreAVX2, CC3 ++ VMOVDQA CC1, tmpStoreAVX2 ++ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) ++ VMOVDQA tmpStoreAVX2, CC1 ++ ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 ++ VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 ++ VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 ++ VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 ++ DECQ itr2 ++ JNE sealAVX2IntroLoop ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 ++ ++ VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 ++ VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key ++ VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 ++ ++ // Clamp and store poly key ++ VPAND ·polyClampMask<>(SB), DD0, DD0 ++ VMOVDQA DD0, rsStoreAVX2 ++ ++ // Hash AD ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ ++ // Can store at least 320 bytes ++ VPXOR (0*32)(inp), AA0, AA0 ++ VPXOR (1*32)(inp), CC0, CC0 ++ VMOVDQU AA0, (0*32)(oup) ++ VMOVDQU CC0, (1*32)(oup) ++ ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) ++ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 ++ VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) ++ ++ MOVQ $320, itr1 ++ SUBQ $320, inl ++ LEAQ 320(inp), inp ++ ++ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 ++ CMPQ inl, $128 ++ JBE sealAVX2SealHash ++ ++ VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) ++ SUBQ $128, inl ++ LEAQ 128(inp), inp ++ ++ MOVQ $8, itr1 ++ MOVQ $2, itr2 ++ ++ CMPQ inl, $128 ++ JBE sealAVX2Tail128 ++ CMPQ inl, $256 ++ JBE sealAVX2Tail256 ++ CMPQ inl, $384 ++ JBE sealAVX2Tail384 ++ CMPQ inl, $512 ++ JBE sealAVX2Tail512 ++ ++ // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 ++ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 ++ ++ VMOVDQA CC3, tmpStoreAVX2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) ++ VMOVDQA tmpStoreAVX2, CC3 ++ VMOVDQA CC1, tmpStoreAVX2 ++ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) ++ VMOVDQA tmpStoreAVX2, CC1 ++ ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 ++ VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 ++ VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 ++ VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 ++ ++ VMOVDQA CC3, tmpStoreAVX2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) ++ VMOVDQA tmpStoreAVX2, CC3 ++ VMOVDQA CC1, tmpStoreAVX2 ++ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) ++ VMOVDQA tmpStoreAVX2, CC1 ++ ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 ++ VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 ++ VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 ++ VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ ++ SUBQ $16, oup // Adjust the pointer ++ MOVQ $9, itr1 ++ JMP sealAVX2InternalLoopStart ++ ++sealAVX2MainLoop: ++ // Load state, increment counter blocks, store the incremented counters ++ VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 ++ VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 ++ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 ++ MOVQ $10, itr1 ++ ++sealAVX2InternalLoop: ++ polyAdd(0*8(oup)) ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ polyMulStage1_AVX2 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ polyMulStage2_AVX2 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyMulStage3_AVX2 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulReduceStage ++ ++sealAVX2InternalLoopStart: ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ polyAdd(2*8(oup)) ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ polyMulStage1_AVX2 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulStage2_AVX2 ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ polyMulStage3_AVX2 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ polyMulReduceStage ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyAdd(4*8(oup)) ++ LEAQ (6*8)(oup), oup ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulStage1_AVX2 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ polyMulStage2_AVX2 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ polyMulStage3_AVX2 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyMulReduceStage ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 ++ DECQ itr1 ++ JNE sealAVX2InternalLoop ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ ++ // We only hashed 480 of the 512 bytes available - hash the remaining 32 here ++ polyAdd(0*8(oup)) ++ polyMulAVX2 ++ LEAQ (4*8)(oup), oup ++ VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 ++ VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 ++ VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) ++ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 ++ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) ++ ++ // and here ++ polyAdd(-2*8(oup)) ++ polyMulAVX2 ++ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 ++ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) ++ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 ++ VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) ++ LEAQ (32*16)(inp), inp ++ SUBQ $(32*16), inl ++ CMPQ inl, $512 ++ JG sealAVX2MainLoop ++ ++ // Tail can only hash 480 bytes ++ polyAdd(0*8(oup)) ++ polyMulAVX2 ++ polyAdd(2*8(oup)) ++ polyMulAVX2 ++ LEAQ 32(oup), oup ++ ++ MOVQ $10, itr1 ++ MOVQ $0, itr2 ++ CMPQ inl, $128 ++ JBE sealAVX2Tail128 ++ CMPQ inl, $256 ++ JBE sealAVX2Tail256 ++ CMPQ inl, $384 ++ JBE sealAVX2Tail384 ++ JMP sealAVX2Tail512 ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for buffers smaller than 193 bytes ++seal192AVX2: ++ // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks ++ VMOVDQA AA0, AA1 ++ VMOVDQA BB0, BB1 ++ VMOVDQA CC0, CC1 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA AA0, AA2 ++ VMOVDQA BB0, BB2 ++ VMOVDQA CC0, CC2 ++ VMOVDQA DD0, DD2 ++ VMOVDQA DD1, TT3 ++ MOVQ $10, itr2 ++ ++sealAVX2192InnerCipherLoop: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 ++ DECQ itr2 ++ JNE sealAVX2192InnerCipherLoop ++ VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 ++ VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 ++ VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 ++ VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ ++ // Clamp and store poly key ++ VPAND ·polyClampMask<>(SB), TT0, TT0 ++ VMOVDQA TT0, rsStoreAVX2 ++ ++ // Stream for up to 192 bytes ++ VPERM2I128 $0x13, AA0, BB0, AA0 ++ VPERM2I128 $0x13, CC0, DD0, BB0 ++ VPERM2I128 $0x02, AA1, BB1, CC0 ++ VPERM2I128 $0x02, CC1, DD1, DD0 ++ VPERM2I128 $0x13, AA1, BB1, AA1 ++ VPERM2I128 $0x13, CC1, DD1, BB1 ++ ++sealAVX2ShortSeal: ++ // Hash aad ++ MOVQ ad_len+80(FP), itr2 ++ CALL polyHashADInternal<>(SB) ++ XORQ itr1, itr1 ++ ++sealAVX2SealHash: ++ // itr1 holds the number of bytes encrypted but not yet hashed ++ CMPQ itr1, $16 ++ JB sealAVX2ShortSealLoop ++ polyAdd(0(oup)) ++ polyMul ++ SUBQ $16, itr1 ++ ADDQ $16, oup ++ JMP sealAVX2SealHash ++ ++sealAVX2ShortSealLoop: ++ CMPQ inl, $32 ++ JB sealAVX2ShortTail32 ++ SUBQ $32, inl ++ ++ // Load for encryption ++ VPXOR (inp), AA0, AA0 ++ VMOVDQU AA0, (oup) ++ LEAQ (1*32)(inp), inp ++ ++ // Now can hash ++ polyAdd(0*8(oup)) ++ polyMulAVX2 ++ polyAdd(2*8(oup)) ++ polyMulAVX2 ++ LEAQ (1*32)(oup), oup ++ ++ // Shift stream left ++ VMOVDQA BB0, AA0 ++ VMOVDQA CC0, BB0 ++ VMOVDQA DD0, CC0 ++ VMOVDQA AA1, DD0 ++ VMOVDQA BB1, AA1 ++ VMOVDQA CC1, BB1 ++ VMOVDQA DD1, CC1 ++ VMOVDQA AA2, DD1 ++ VMOVDQA BB2, AA2 ++ JMP sealAVX2ShortSealLoop ++ ++sealAVX2ShortTail32: ++ CMPQ inl, $16 ++ VMOVDQA A0, A1 ++ JB sealAVX2ShortDone ++ ++ SUBQ $16, inl ++ ++ // Load for encryption ++ VPXOR (inp), A0, T0 ++ VMOVDQU T0, (oup) ++ LEAQ (1*16)(inp), inp ++ ++ // Hash ++ polyAdd(0*8(oup)) ++ polyMulAVX2 ++ LEAQ (1*16)(oup), oup ++ VPERM2I128 $0x11, AA0, AA0, AA0 ++ VMOVDQA A0, A1 ++ ++sealAVX2ShortDone: ++ VZEROUPPER ++ JMP sealSSETail ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for buffers smaller than 321 bytes ++seal320AVX2: ++ // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks ++ VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 ++ VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 ++ MOVQ $10, itr2 ++ ++sealAVX2320InnerCipherLoop: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 ++ DECQ itr2 ++ JNE sealAVX2320InnerCipherLoop ++ ++ VMOVDQA ·chacha20Constants<>(SB), TT0 ++ VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 ++ VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 ++ VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 ++ VMOVDQA ·avx2IncMask<>(SB), TT0 ++ VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 ++ VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 ++ VPADDD TT3, DD2, DD2 ++ ++ // Clamp and store poly key ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ VPAND ·polyClampMask<>(SB), TT0, TT0 ++ VMOVDQA TT0, rsStoreAVX2 ++ ++ // Stream for up to 320 bytes ++ VPERM2I128 $0x13, AA0, BB0, AA0 ++ VPERM2I128 $0x13, CC0, DD0, BB0 ++ VPERM2I128 $0x02, AA1, BB1, CC0 ++ VPERM2I128 $0x02, CC1, DD1, DD0 ++ VPERM2I128 $0x13, AA1, BB1, AA1 ++ VPERM2I128 $0x13, CC1, DD1, BB1 ++ VPERM2I128 $0x02, AA2, BB2, CC1 ++ VPERM2I128 $0x02, CC2, DD2, DD1 ++ VPERM2I128 $0x13, AA2, BB2, AA2 ++ VPERM2I128 $0x13, CC2, DD2, BB2 ++ JMP sealAVX2ShortSeal ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 128 bytes of ciphertext ++sealAVX2Tail128: ++ // Need to decrypt up to 128 bytes - prepare two blocks ++ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed ++ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed ++ VMOVDQA ·chacha20Constants<>(SB), AA0 ++ VMOVDQA state1StoreAVX2, BB0 ++ VMOVDQA state2StoreAVX2, CC0 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0 ++ VMOVDQA DD0, DD1 ++ ++sealAVX2Tail128LoopA: ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealAVX2Tail128LoopB: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) ++ polyAdd(0(oup)) ++ polyMul ++ VPALIGNR $4, BB0, BB0, BB0 ++ VPALIGNR $8, CC0, CC0, CC0 ++ VPALIGNR $12, DD0, DD0, DD0 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) ++ polyAdd(16(oup)) ++ polyMul ++ LEAQ 32(oup), oup ++ VPALIGNR $12, BB0, BB0, BB0 ++ VPALIGNR $8, CC0, CC0, CC0 ++ VPALIGNR $4, DD0, DD0, DD0 ++ DECQ itr1 ++ JG sealAVX2Tail128LoopA ++ DECQ itr2 ++ JGE sealAVX2Tail128LoopB ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA1 ++ VPADDD state1StoreAVX2, BB0, BB1 ++ VPADDD state2StoreAVX2, CC0, CC1 ++ VPADDD DD1, DD0, DD1 ++ ++ VPERM2I128 $0x02, AA1, BB1, AA0 ++ VPERM2I128 $0x02, CC1, DD1, BB0 ++ VPERM2I128 $0x13, AA1, BB1, CC0 ++ VPERM2I128 $0x13, CC1, DD1, DD0 ++ JMP sealAVX2ShortSealLoop ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 256 bytes of ciphertext ++sealAVX2Tail256: ++ // Need to decrypt up to 256 bytes - prepare two blocks ++ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed ++ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD1 ++ VMOVDQA DD0, TT1 ++ VMOVDQA DD1, TT2 ++ ++sealAVX2Tail256LoopA: ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealAVX2Tail256LoopB: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ polyAdd(0(oup)) ++ polyMul ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) ++ polyAdd(16(oup)) ++ polyMul ++ LEAQ 32(oup), oup ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 ++ DECQ itr1 ++ JG sealAVX2Tail256LoopA ++ DECQ itr2 ++ JGE sealAVX2Tail256LoopB ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 ++ VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ VPERM2I128 $0x02, CC0, DD0, TT1 ++ VPERM2I128 $0x13, AA0, BB0, TT2 ++ VPERM2I128 $0x13, CC0, DD0, TT3 ++ VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 ++ VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) ++ MOVQ $128, itr1 ++ LEAQ 128(inp), inp ++ SUBQ $128, inl ++ VPERM2I128 $0x02, AA1, BB1, AA0 ++ VPERM2I128 $0x02, CC1, DD1, BB0 ++ VPERM2I128 $0x13, AA1, BB1, CC0 ++ VPERM2I128 $0x13, CC1, DD1, DD0 ++ ++ JMP sealAVX2SealHash ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 384 bytes of ciphertext ++sealAVX2Tail384: ++ // Need to decrypt up to 384 bytes - prepare two blocks ++ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed ++ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 ++ VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 ++ ++sealAVX2Tail384LoopA: ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealAVX2Tail384LoopB: ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ polyAdd(0(oup)) ++ polyMul ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 ++ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) ++ polyAdd(16(oup)) ++ polyMul ++ LEAQ 32(oup), oup ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 ++ DECQ itr1 ++ JG sealAVX2Tail384LoopA ++ DECQ itr2 ++ JGE sealAVX2Tail384LoopB ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 ++ VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 ++ VPERM2I128 $0x02, AA0, BB0, TT0 ++ VPERM2I128 $0x02, CC0, DD0, TT1 ++ VPERM2I128 $0x13, AA0, BB0, TT2 ++ VPERM2I128 $0x13, CC0, DD0, TT3 ++ VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 ++ VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) ++ VPERM2I128 $0x02, AA1, BB1, TT0 ++ VPERM2I128 $0x02, CC1, DD1, TT1 ++ VPERM2I128 $0x13, AA1, BB1, TT2 ++ VPERM2I128 $0x13, CC1, DD1, TT3 ++ VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 ++ VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) ++ MOVQ $256, itr1 ++ LEAQ 256(inp), inp ++ SUBQ $256, inl ++ VPERM2I128 $0x02, AA2, BB2, AA0 ++ VPERM2I128 $0x02, CC2, DD2, BB0 ++ VPERM2I128 $0x13, AA2, BB2, CC0 ++ VPERM2I128 $0x13, CC2, DD2, DD0 ++ ++ JMP sealAVX2SealHash ++ ++// ---------------------------------------------------------------------------- ++// Special optimization for the last 512 bytes of ciphertext ++sealAVX2Tail512: ++ // Need to decrypt up to 512 bytes - prepare two blocks ++ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed ++ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed ++ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 ++ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 ++ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 ++ VMOVDQA ctr3StoreAVX2, DD0 ++ VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 ++ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 ++ ++sealAVX2Tail512LoopA: ++ polyAdd(0(oup)) ++ polyMul ++ LEAQ 16(oup), oup ++ ++sealAVX2Tail512LoopB: ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ polyAdd(0*8(oup)) ++ polyMulAVX2 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ polyAdd(2*8(oup)) ++ polyMulAVX2 ++ LEAQ (4*8)(oup), oup ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 ++ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 ++ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 ++ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 ++ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 ++ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 ++ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 ++ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 ++ VMOVDQA tmpStoreAVX2, CC3 ++ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 ++ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 ++ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 ++ ++ DECQ itr1 ++ JG sealAVX2Tail512LoopA ++ DECQ itr2 ++ JGE sealAVX2Tail512LoopB ++ ++ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 ++ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 ++ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 ++ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 ++ VMOVDQA CC3, tmpStoreAVX2 ++ VPERM2I128 $0x02, AA0, BB0, CC3 ++ VPXOR (0*32)(inp), CC3, CC3 ++ VMOVDQU CC3, (0*32)(oup) ++ VPERM2I128 $0x02, CC0, DD0, CC3 ++ VPXOR (1*32)(inp), CC3, CC3 ++ VMOVDQU CC3, (1*32)(oup) ++ VPERM2I128 $0x13, AA0, BB0, CC3 ++ VPXOR (2*32)(inp), CC3, CC3 ++ VMOVDQU CC3, (2*32)(oup) ++ VPERM2I128 $0x13, CC0, DD0, CC3 ++ VPXOR (3*32)(inp), CC3, CC3 ++ VMOVDQU CC3, (3*32)(oup) ++ ++ VPERM2I128 $0x02, AA1, BB1, AA0 ++ VPERM2I128 $0x02, CC1, DD1, BB0 ++ VPERM2I128 $0x13, AA1, BB1, CC0 ++ VPERM2I128 $0x13, CC1, DD1, DD0 ++ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) ++ ++ VPERM2I128 $0x02, AA2, BB2, AA0 ++ VPERM2I128 $0x02, CC2, DD2, BB0 ++ VPERM2I128 $0x13, AA2, BB2, CC0 ++ VPERM2I128 $0x13, CC2, DD2, DD0 ++ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 ++ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) ++ ++ MOVQ $384, itr1 ++ LEAQ 384(inp), inp ++ SUBQ $384, inl ++ VPERM2I128 $0x02, AA3, BB3, AA0 ++ VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 ++ VPERM2I128 $0x13, AA3, BB3, CC0 ++ VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 ++ ++ JMP sealAVX2SealHash +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go +new file mode 100644 +index 00000000000..6313898f0a7 +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go +@@ -0,0 +1,81 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package chacha20poly1305 ++ ++import ( ++ "encoding/binary" ++ ++ "golang.org/x/crypto/chacha20" ++ "golang.org/x/crypto/internal/alias" ++ "golang.org/x/crypto/internal/poly1305" ++) ++ ++func writeWithPadding(p *poly1305.MAC, b []byte) { ++ p.Write(b) ++ if rem := len(b) % 16; rem != 0 { ++ var buf [16]byte ++ padLen := 16 - rem ++ p.Write(buf[:padLen]) ++ } ++} ++ ++func writeUint64(p *poly1305.MAC, n int) { ++ var buf [8]byte ++ binary.LittleEndian.PutUint64(buf[:], uint64(n)) ++ p.Write(buf[:]) ++} ++ ++func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte { ++ ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) ++ ciphertext, tag := out[:len(plaintext)], out[len(plaintext):] ++ if alias.InexactOverlap(out, plaintext) { ++ panic("chacha20poly1305: invalid buffer overlap") ++ } ++ ++ var polyKey [32]byte ++ s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) ++ s.XORKeyStream(polyKey[:], polyKey[:]) ++ s.SetCounter(1) // set the counter to 1, skipping 32 bytes ++ s.XORKeyStream(ciphertext, plaintext) ++ ++ p := poly1305.New(&polyKey) ++ writeWithPadding(p, additionalData) ++ writeWithPadding(p, ciphertext) ++ writeUint64(p, len(additionalData)) ++ writeUint64(p, len(plaintext)) ++ p.Sum(tag[:0]) ++ ++ return ret ++} ++ ++func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { ++ tag := ciphertext[len(ciphertext)-16:] ++ ciphertext = ciphertext[:len(ciphertext)-16] ++ ++ var polyKey [32]byte ++ s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) ++ s.XORKeyStream(polyKey[:], polyKey[:]) ++ s.SetCounter(1) // set the counter to 1, skipping 32 bytes ++ ++ p := poly1305.New(&polyKey) ++ writeWithPadding(p, additionalData) ++ writeWithPadding(p, ciphertext) ++ writeUint64(p, len(additionalData)) ++ writeUint64(p, len(ciphertext)) ++ ++ ret, out := sliceForAppend(dst, len(ciphertext)) ++ if alias.InexactOverlap(out, ciphertext) { ++ panic("chacha20poly1305: invalid buffer overlap") ++ } ++ if !p.Verify(tag) { ++ for i := range out { ++ out[i] = 0 ++ } ++ return nil, errOpen ++ } ++ ++ s.XORKeyStream(out, ciphertext) ++ return ret, nil ++} +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go +new file mode 100644 +index 00000000000..34e6ab1df88 +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go +@@ -0,0 +1,15 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build !amd64 || !gc || purego ++ ++package chacha20poly1305 ++ ++func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { ++ return c.sealGeneric(dst, nonce, plaintext, additionalData) ++} ++ ++func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { ++ return c.openGeneric(dst, nonce, ciphertext, additionalData) ++} +diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go +new file mode 100644 +index 00000000000..1cebfe946f4 +--- /dev/null ++++ b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go +@@ -0,0 +1,86 @@ ++// Copyright 2018 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package chacha20poly1305 ++ ++import ( ++ "crypto/cipher" ++ "errors" ++ ++ "golang.org/x/crypto/chacha20" ++) ++ ++type xchacha20poly1305 struct { ++ key [KeySize]byte ++} ++ ++// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key. ++// ++// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce, ++// suitable to be generated randomly without risk of collisions. It should be ++// preferred when nonce uniqueness cannot be trivially ensured, or whenever ++// nonces are randomly generated. ++func NewX(key []byte) (cipher.AEAD, error) { ++ if len(key) != KeySize { ++ return nil, errors.New("chacha20poly1305: bad key length") ++ } ++ ret := new(xchacha20poly1305) ++ copy(ret.key[:], key) ++ return ret, nil ++} ++ ++func (*xchacha20poly1305) NonceSize() int { ++ return NonceSizeX ++} ++ ++func (*xchacha20poly1305) Overhead() int { ++ return Overhead ++} ++ ++func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { ++ if len(nonce) != NonceSizeX { ++ panic("chacha20poly1305: bad nonce length passed to Seal") ++ } ++ ++ // XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no ++ // size limit. However, since we reuse the ChaCha20-Poly1305 implementation, ++ // the second half of the counter is not available. This is unlikely to be ++ // an issue because the cipher.AEAD API requires the entire message to be in ++ // memory, and the counter overflows at 256 GB. ++ if uint64(len(plaintext)) > (1<<38)-64 { ++ panic("chacha20poly1305: plaintext too large") ++ } ++ ++ c := new(chacha20poly1305) ++ hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) ++ copy(c.key[:], hKey) ++ ++ // The first 4 bytes of the final nonce are unused counter space. ++ cNonce := make([]byte, NonceSize) ++ copy(cNonce[4:12], nonce[16:24]) ++ ++ return c.seal(dst, cNonce[:], plaintext, additionalData) ++} ++ ++func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { ++ if len(nonce) != NonceSizeX { ++ panic("chacha20poly1305: bad nonce length passed to Open") ++ } ++ if len(ciphertext) < 16 { ++ return nil, errOpen ++ } ++ if uint64(len(ciphertext)) > (1<<38)-48 { ++ panic("chacha20poly1305: ciphertext too large") ++ } ++ ++ c := new(chacha20poly1305) ++ hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) ++ copy(c.key[:], hKey) ++ ++ // The first 4 bytes of the final nonce are unused counter space. ++ cNonce := make([]byte, NonceSize) ++ copy(cNonce[4:12], nonce[16:24]) ++ ++ return c.open(dst, cNonce[:], ciphertext, additionalData) ++} +diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go +new file mode 100644 +index 00000000000..f4ded5fee2f +--- /dev/null ++++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go +@@ -0,0 +1,95 @@ ++// Copyright 2014 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation ++// Function (HKDF) as defined in RFC 5869. ++// ++// HKDF is a cryptographic key derivation function (KDF) with the goal of ++// expanding limited input keying material into one or more cryptographically ++// strong secret keys. ++package hkdf // import "golang.org/x/crypto/hkdf" ++ ++import ( ++ "crypto/hmac" ++ "errors" ++ "hash" ++ "io" ++) ++ ++// Extract generates a pseudorandom key for use with Expand from an input secret ++// and an optional independent salt. ++// ++// Only use this function if you need to reuse the extracted key with multiple ++// Expand invocations and different context values. Most common scenarios, ++// including the generation of multiple keys, should use New instead. ++func Extract(hash func() hash.Hash, secret, salt []byte) []byte { ++ if salt == nil { ++ salt = make([]byte, hash().Size()) ++ } ++ extractor := hmac.New(hash, salt) ++ extractor.Write(secret) ++ return extractor.Sum(nil) ++} ++ ++type hkdf struct { ++ expander hash.Hash ++ size int ++ ++ info []byte ++ counter byte ++ ++ prev []byte ++ buf []byte ++} ++ ++func (f *hkdf) Read(p []byte) (int, error) { ++ // Check whether enough data can be generated ++ need := len(p) ++ remains := len(f.buf) + int(255-f.counter+1)*f.size ++ if remains < need { ++ return 0, errors.New("hkdf: entropy limit reached") ++ } ++ // Read any leftover from the buffer ++ n := copy(p, f.buf) ++ p = p[n:] ++ ++ // Fill the rest of the buffer ++ for len(p) > 0 { ++ if f.counter > 1 { ++ f.expander.Reset() ++ } ++ f.expander.Write(f.prev) ++ f.expander.Write(f.info) ++ f.expander.Write([]byte{f.counter}) ++ f.prev = f.expander.Sum(f.prev[:0]) ++ f.counter++ ++ ++ // Copy the new batch into p ++ f.buf = f.prev ++ n = copy(p, f.buf) ++ p = p[n:] ++ } ++ // Save leftovers for next run ++ f.buf = f.buf[n:] ++ ++ return need, nil ++} ++ ++// Expand returns a Reader, from which keys can be read, using the given ++// pseudorandom key and optional context info, skipping the extraction step. ++// ++// The pseudorandomKey should have been generated by Extract, or be a uniformly ++// random or pseudorandom cryptographically strong key. See RFC 5869, Section ++// 3.3. Most common scenarios will want to use New instead. ++func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { ++ expander := hmac.New(hash, pseudorandomKey) ++ return &hkdf{expander, expander.Size(), info, 1, nil, nil} ++} ++ ++// New returns a Reader, from which keys can be read, using the given hash, ++// secret, salt and context info. Salt and info can be nil. ++func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { ++ prk := Extract(hash, secret, salt) ++ return Expand(hash, prk, info) ++} +diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go +index 16c6c6b90ce..e61587945b0 100644 +--- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go ++++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build appengine +-// +build appengine + + // This file applies to App Engine first generation runtimes (<= Go 1.9). + +diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +index a7e27b3d299..9c79aa0a0cc 100644 +--- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go ++++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build !appengine +-// +build !appengine + + // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. + +diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go +index b3e8783cc59..2cf71f0f93f 100644 +--- a/vendor/golang.org/x/oauth2/google/default.go ++++ b/vendor/golang.org/x/oauth2/google/default.go +@@ -8,7 +8,6 @@ import ( + "context" + "encoding/json" + "fmt" +- "io/ioutil" + "net/http" + "os" + "path/filepath" +@@ -142,10 +141,8 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar + + // Second, try a well-known file. + filename := wellKnownFile() +- if creds, err := readCredentialsFile(ctx, filename, params); err == nil { +- return creds, nil +- } else if !os.IsNotExist(err) { +- return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) ++ if b, err := os.ReadFile(filename); err == nil { ++ return CredentialsFromJSONWithParams(ctx, b, params) + } + + // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) +@@ -231,7 +228,7 @@ func wellKnownFile() string { + } + + func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { +- b, err := ioutil.ReadFile(filename) ++ b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } +diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go +index e1755d1d9ac..d28140f789e 100644 +--- a/vendor/golang.org/x/oauth2/internal/client_appengine.go ++++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go +@@ -3,7 +3,6 @@ + // license that can be found in the LICENSE file. + + //go:build appengine +-// +build appengine + + package internal + +diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go +index c0ab196cf46..14989beaf49 100644 +--- a/vendor/golang.org/x/oauth2/internal/oauth2.go ++++ b/vendor/golang.org/x/oauth2/internal/oauth2.go +@@ -14,7 +14,7 @@ import ( + + // ParseKey converts the binary contents of a private key file + // to an *rsa.PrivateKey. It detects whether the private key is in a +-// PEM container or not. If so, it extracts the the private key ++// PEM container or not. If so, it extracts the private key + // from PEM container before conversion. It only supports PEM + // containers with no passphrase. + func ParseKey(key []byte) (*rsa.PrivateKey, error) { +diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go +index b4723fcacea..58901bda53e 100644 +--- a/vendor/golang.org/x/oauth2/internal/token.go ++++ b/vendor/golang.org/x/oauth2/internal/token.go +@@ -55,12 +55,18 @@ type Token struct { + } + + // tokenJSON is the struct representing the HTTP response from OAuth2 +-// providers returning a token in JSON form. ++// providers returning a token or error in JSON form. ++// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 + type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number ++ // error fields ++ // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 ++ ErrorCode string `json:"error"` ++ ErrorDescription string `json:"error_description"` ++ ErrorURI string `json:"error_uri"` + } + + func (e *tokenJSON) expiry() (t time.Time) { +@@ -236,21 +242,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } +- if code := r.StatusCode; code < 200 || code > 299 { +- return nil, &RetrieveError{ +- Response: r, +- Body: body, +- } ++ ++ failureStatus := r.StatusCode < 200 || r.StatusCode > 299 ++ retrieveError := &RetrieveError{ ++ Response: r, ++ Body: body, ++ // attempt to populate error detail below + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": ++ // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { +- return nil, err ++ if failureStatus { ++ return nil, retrieveError ++ } ++ return nil, fmt.Errorf("oauth2: cannot parse response: %v", err) + } ++ retrieveError.ErrorCode = vals.Get("error") ++ retrieveError.ErrorDescription = vals.Get("error_description") ++ retrieveError.ErrorURI = vals.Get("error_uri") + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), +@@ -265,8 +279,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { +- return nil, err ++ if failureStatus { ++ return nil, retrieveError ++ } ++ return nil, fmt.Errorf("oauth2: cannot parse json: %v", err) + } ++ retrieveError.ErrorCode = tj.ErrorCode ++ retrieveError.ErrorDescription = tj.ErrorDescription ++ retrieveError.ErrorURI = tj.ErrorURI + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, +@@ -276,17 +296,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } ++ // according to spec, servers should respond status 400 in error case ++ // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 ++ // but some unorthodox servers respond 200 in error case ++ if failureStatus || retrieveError.ErrorCode != "" { ++ return nil, retrieveError ++ } + if token.AccessToken == "" { + return nil, errors.New("oauth2: server response missing access_token") + } + return token, nil + } + ++// mirrors oauth2.RetrieveError + type RetrieveError struct { +- Response *http.Response +- Body []byte ++ Response *http.Response ++ Body []byte ++ ErrorCode string ++ ErrorDescription string ++ ErrorURI string + } + + func (r *RetrieveError) Error() string { ++ if r.ErrorCode != "" { ++ s := fmt.Sprintf("oauth2: %q", r.ErrorCode) ++ if r.ErrorDescription != "" { ++ s += fmt.Sprintf(" %q", r.ErrorDescription) ++ } ++ if r.ErrorURI != "" { ++ s += fmt.Sprintf(" %q", r.ErrorURI) ++ } ++ return s ++ } + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) + } +diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go +index 7c64006de69..5ffce9764be 100644 +--- a/vendor/golang.org/x/oauth2/token.go ++++ b/vendor/golang.org/x/oauth2/token.go +@@ -175,14 +175,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) + } + + // RetrieveError is the error returned when the token endpoint returns a +-// non-2XX HTTP status code. ++// non-2XX HTTP status code or populates RFC 6749's 'error' parameter. ++// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + type RetrieveError struct { + Response *http.Response + // Body is the body that was consumed by reading Response.Body. + // It may be truncated. + Body []byte ++ // ErrorCode is RFC 6749's 'error' parameter. ++ ErrorCode string ++ // ErrorDescription is RFC 6749's 'error_description' parameter. ++ ErrorDescription string ++ // ErrorURI is RFC 6749's 'error_uri' parameter. ++ ErrorURI string + } + + func (r *RetrieveError) Error() string { ++ if r.ErrorCode != "" { ++ s := fmt.Sprintf("oauth2: %q", r.ErrorCode) ++ if r.ErrorDescription != "" { ++ s += fmt.Sprintf(" %q", r.ErrorDescription) ++ } ++ if r.ErrorURI != "" { ++ s += fmt.Sprintf(" %q", r.ErrorURI) ++ } ++ return s ++ } + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) + } +diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +index b1361722dca..c704bad622a 100644 +--- a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json ++++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +@@ -5339,7 +5339,7 @@ + ], + "parameters": { + "ipAddress": { +- "description": "The ip_address could be external IPv4, or internal IPv4 within IPv6 form of virtual_network_id with internal IPv4. IPv6 is not supported yet.", ++ "description": "The VM IP address.", + "location": "query", + "type": "string" + }, +@@ -9429,6 +9429,7 @@ + ] + }, + "setAutoHealingPolicies": { ++ "deprecated": true, + "description": "Motifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use instanceGroupManagers.patch instead.", + "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + "httpMethod": "POST", +@@ -12543,6 +12544,55 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified instance. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", ++ "httpMethod": "POST", ++ "id": "compute.instances.setSecurityPolicy", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "instance" ++ ], ++ "parameters": { ++ "instance": { ++ "description": "Name of the Instance resource to which the security policy should be set. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "Name of the zone scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", ++ "request": { ++ "$ref": "InstancesSetSecurityPolicyRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setServiceAccount": { + "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", +@@ -16301,6 +16351,56 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "patch": { ++ "description": "Patches the specified NetworkAttachment resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", ++ "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", ++ "httpMethod": "PATCH", ++ "id": "compute.networkAttachments.patch", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "networkAttachment" ++ ], ++ "parameters": { ++ "networkAttachment": { ++ "description": "Name of the NetworkAttachment resource to patch.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", ++ "request": { ++ "$ref": "NetworkAttachment" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", +@@ -22849,6 +22949,100 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "calculateCancellationFee": { ++ "description": "Calculate cancellation fee for the specified commitment.", ++ "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}/calculateCancellationFee", ++ "httpMethod": "POST", ++ "id": "compute.regionCommitments.calculateCancellationFee", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "commitment" ++ ], ++ "parameters": { ++ "commitment": { ++ "description": "Name of the commitment to delete.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/commitments/{commitment}/calculateCancellationFee", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "cancel": { ++ "description": "Cancel the specified commitment.", ++ "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}/cancel", ++ "httpMethod": "POST", ++ "id": "compute.regionCommitments.cancel", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "commitment" ++ ], ++ "parameters": { ++ "commitment": { ++ "description": "Name of the commitment to delete.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/commitments/{commitment}/cancel", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "get": { + "description": "Returns the specified commitment resource.", + "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", +@@ -25729,6 +25923,7 @@ + ] + }, + "setAutoHealingPolicies": { ++ "deprecated": true, + "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use regionInstanceGroupManagers.patch instead.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + "httpMethod": "POST", +@@ -28079,6 +28274,56 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "patchAssociation": { ++ "description": "Updates an association for the specified network firewall policy.", ++ "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchAssociation", ++ "httpMethod": "POST", ++ "id": "compute.regionNetworkFirewallPolicies.patchAssociation", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "firewallPolicy" ++ ], ++ "parameters": { ++ "firewallPolicy": { ++ "description": "Name of the firewall policy to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchAssociation", ++ "request": { ++ "$ref": "FirewallPolicyAssociation" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "patchRule": { + "description": "Patches a rule of the specified priority.", + "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchRule", +@@ -32495,6 +32740,53 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "getNatIpInfo": { ++ "description": "Retrieves runtime NAT IP information.", ++ "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", ++ "httpMethod": "GET", ++ "id": "compute.routers.getNatIpInfo", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "router" ++ ], ++ "parameters": { ++ "natName": { ++ "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "router": { ++ "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", ++ "response": { ++ "$ref": "NatIpInfoResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, + "getNatMappingInfo": { + "description": "Retrieves runtime Nat mapping information of VM endpoints.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo", +@@ -34034,11 +34326,6 @@ + "required": true, + "type": "string" + }, +- "reconcileConnections": { +- "description": "This flag determines how to change the status of consumer connections, when the connection policy for the corresponding project or network is modified. If the flag is false, the default case, then existing ACCEPTED and REJECTED consumer connections stay in that state. For example, even if the project is removed from the accept list, existing ACCEPTED connections will stay the same. If the flag is true, then the connection can change from ACCEPTED or REJECTED to pending when the connection policy is modified. For example, if a project is removed from the reject list, its existing REJECTED connections will move to the PENDING state. If the project is also added to the accept list, then those connections will move to the ACCEPTED state.", +- "location": "query", +- "type": "boolean" +- }, + "region": { + "description": "The region scoping this request and should conform to RFC1035.", + "location": "path", +@@ -35106,13 +35393,13 @@ + } + } + }, +- "subnetworks": { ++ "storagePools": { + "methods": { + "aggregatedList": { +- "description": "Retrieves an aggregated list of subnetworks.", +- "flatPath": "projects/{project}/aggregated/subnetworks", ++ "description": "Retrieves an aggregated list of storage pools.", ++ "flatPath": "projects/{project}/aggregated/storagePools", + "httpMethod": "GET", +- "id": "compute.subnetworks.aggregatedList", ++ "id": "compute.storagePools.aggregatedList", + "parameterOrder": [ + "project" + ], +@@ -35158,9 +35445,9 @@ + "type": "boolean" + } + }, +- "path": "projects/{project}/aggregated/subnetworks", ++ "path": "projects/{project}/aggregated/storagePools", + "response": { +- "$ref": "SubnetworkAggregatedList" ++ "$ref": "StoragePoolAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -35169,14 +35456,14 @@ + ] + }, + "delete": { +- "description": "Deletes the specified subnetwork.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "description": "Deletes the specified storage pool. Deleting a storagePool removes its data permanently and is irreversible. However, deleting a storagePool does not delete any snapshots previously made from the storagePool. You must separately delete snapshots.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", + "httpMethod": "DELETE", +- "id": "compute.subnetworks.delete", ++ "id": "compute.storagePools.delete", + "parameterOrder": [ + "project", +- "region", +- "subnetwork" ++ "zone", ++ "storagePool" + ], + "parameters": { + "project": { +@@ -35186,27 +35473,26 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- "required": true, +- "type": "string" +- }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, +- "subnetwork": { +- "description": "Name of the Subnetwork resource to delete.", ++ "storagePool": { ++ "description": "Name of the storage pool to delete.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", + "response": { + "$ref": "Operation" + }, +@@ -35215,15 +35501,15 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "expandIpCidrRange": { +- "description": "Expands the IP CIDR range of the subnetwork to a specified value.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", +- "httpMethod": "POST", +- "id": "compute.subnetworks.expandIpCidrRange", ++ "get": { ++ "description": "Returns a specified storage pool. Gets a list of available storage pools by making a list() request.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ "httpMethod": "GET", ++ "id": "compute.storagePools.get", + "parameterOrder": [ + "project", +- "region", +- "subnetwork" ++ "zone", ++ "storagePool" + ], + "parameters": { + "project": { +@@ -35233,29 +35519,113 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", ++ "storagePool": { ++ "description": "Name of the storage pool to return.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ "response": { ++ "$ref": "StoragePool" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "getIamPolicy": { ++ "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/getIamPolicy", ++ "httpMethod": "GET", ++ "id": "compute.storagePools.getIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "resource" ++ ], ++ "parameters": { ++ "optionsRequestedPolicyVersion": { ++ "description": "Requested IAM Policy version.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/storagePools/{resource}/getIamPolicy", ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Creates a storage pool in the specified project using the data in the request.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools", ++ "httpMethod": "POST", ++ "id": "compute.storagePools.insert", ++ "parameterOrder": [ ++ "project", ++ "zone" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, +- "subnetwork": { +- "description": "Name of the Subnetwork resource to update.", ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", ++ "path": "projects/{project}/zones/{zone}/storagePools", + "request": { +- "$ref": "SubnetworksExpandIpCidrRangeRequest" ++ "$ref": "StoragePool" + }, + "response": { + "$ref": "Operation" +@@ -35265,17 +35635,39 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "get": { +- "description": "Returns the specified subnetwork.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "list": { ++ "description": "Retrieves a list of storage pools contained within the specified zone.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools", + "httpMethod": "GET", +- "id": "compute.subnetworks.get", ++ "id": "compute.storagePools.list", + "parameterOrder": [ + "project", +- "region", +- "subnetwork" ++ "zone" + ], + "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -35283,48 +35675,85 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/storagePools", ++ "response": { ++ "$ref": "StoragePoolList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "setIamPolicy": { ++ "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/setIamPolicy", ++ "httpMethod": "POST", ++ "id": "compute.storagePools.setIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" + }, +- "subnetwork": { +- "description": "Name of the Subnetwork resource to return.", ++ "resource": { ++ "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "path": "projects/{project}/zones/{zone}/storagePools/{resource}/setIamPolicy", ++ "request": { ++ "$ref": "ZoneSetPolicyRequest" ++ }, + "response": { +- "$ref": "Subnetwork" ++ "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" ++ "https://www.googleapis.com/auth/compute" + ] + }, +- "getIamPolicy": { +- "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", +- "httpMethod": "GET", +- "id": "compute.subnetworks.getIamPolicy", ++ "setLabels": { ++ "description": "Sets the labels on a storage pools. To learn more about labels, read the Labeling Resources documentation.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/setLabels", ++ "httpMethod": "POST", ++ "id": "compute.storagePools.setLabels", + "parameterOrder": [ + "project", +- "region", ++ "zone", + "resource" + ], + "parameters": { +- "optionsRequestedPolicyVersion": { +- "description": "Requested IAM Policy version.", +- "format": "int32", +- "location": "query", +- "type": "integer" +- }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -35332,12 +35761,55 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "The name of the region for this request.", ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/storagePools/{resource}/setLabels", ++ "request": { ++ "$ref": "ZoneSetLabelsRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/testIamPermissions", ++ "httpMethod": "POST", ++ "id": "compute.storagePools.testIamPermissions", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", +@@ -35345,11 +35817,21 @@ + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", ++ "path": "projects/{project}/zones/{zone}/storagePools/{resource}/testIamPermissions", ++ "request": { ++ "$ref": "TestPermissionsRequest" ++ }, + "response": { +- "$ref": "Policy" ++ "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -35357,14 +35839,15 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "insert": { +- "description": "Creates a subnetwork in the specified project using the data included in the request.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks", +- "httpMethod": "POST", +- "id": "compute.subnetworks.insert", ++ "update": { ++ "description": "Updates the specified storagePool with the data included in the request. The update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: size_tb and provisioned_iops.", ++ "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ "httpMethod": "PATCH", ++ "id": "compute.storagePools.update", + "parameterOrder": [ + "project", +- "region" ++ "zone", ++ "storagePool" + ], + "parameters": { + "project": { +@@ -35374,22 +35857,35 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "storagePool": { ++ "description": "The storagePool name for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "requestId": { +- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "updateMask": { ++ "description": "update_mask indicates fields to be updated as part of this request.", ++ "format": "google-fieldmask", + "location": "query", + "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/subnetworks", ++ "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", + "request": { +- "$ref": "Subnetwork" ++ "$ref": "StoragePool" + }, + "response": { + "$ref": "Operation" +@@ -35398,15 +35894,310 @@ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] +- }, +- "list": { +- "description": "Retrieves a list of subnetworks available to the specified project.", +- "flatPath": "projects/{project}/regions/{region}/subnetworks", ++ } ++ } ++ }, ++ "subnetworks": { ++ "methods": { ++ "aggregatedList": { ++ "description": "Retrieves an aggregated list of subnetworks.", ++ "flatPath": "projects/{project}/aggregated/subnetworks", + "httpMethod": "GET", +- "id": "compute.subnetworks.list", ++ "id": "compute.subnetworks.aggregatedList", + "parameterOrder": [ +- "project", +- "region" ++ "project" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "includeAllScopes": { ++ "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ "location": "query", ++ "type": "boolean" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/aggregated/subnetworks", ++ "response": { ++ "$ref": "SubnetworkAggregatedList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "delete": { ++ "description": "Deletes the specified subnetwork.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "httpMethod": "DELETE", ++ "id": "compute.subnetworks.delete", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "subnetwork" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "subnetwork": { ++ "description": "Name of the Subnetwork resource to delete.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "expandIpCidrRange": { ++ "description": "Expands the IP CIDR range of the subnetwork to a specified value.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", ++ "httpMethod": "POST", ++ "id": "compute.subnetworks.expandIpCidrRange", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "subnetwork" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "subnetwork": { ++ "description": "Name of the Subnetwork resource to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", ++ "request": { ++ "$ref": "SubnetworksExpandIpCidrRangeRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "get": { ++ "description": "Returns the specified subnetwork.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "httpMethod": "GET", ++ "id": "compute.subnetworks.get", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "subnetwork" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "subnetwork": { ++ "description": "Name of the Subnetwork resource to return.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", ++ "response": { ++ "$ref": "Subnetwork" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "getIamPolicy": { ++ "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", ++ "httpMethod": "GET", ++ "id": "compute.subnetworks.getIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "resource" ++ ], ++ "parameters": { ++ "optionsRequestedPolicyVersion": { ++ "description": "Requested IAM Policy version.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Creates a subnetwork in the specified project using the data included in the request.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks", ++ "httpMethod": "POST", ++ "id": "compute.subnetworks.insert", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/subnetworks", ++ "request": { ++ "$ref": "Subnetwork" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "list": { ++ "description": "Retrieves a list of subnetworks available to the specified project.", ++ "flatPath": "projects/{project}/regions/{region}/subnetworks", ++ "httpMethod": "GET", ++ "id": "compute.subnetworks.list", ++ "parameterOrder": [ ++ "project", ++ "region" + ], + "parameters": { + "filter": { +@@ -37088,6 +37879,55 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified target instance. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ "httpMethod": "POST", ++ "id": "compute.targetInstances.setSecurityPolicy", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "targetInstance" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "targetInstance": { ++ "description": "Name of the TargetInstance resource to which the security policy should be set. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "Name of the zone scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ "request": { ++ "$ref": "SecurityPolicyReference" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", +@@ -37695,6 +38535,55 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified target pool. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ "httpMethod": "POST", ++ "id": "compute.targetPools.setSecurityPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "targetPool" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "targetPool": { ++ "description": "Name of the TargetPool resource to which the security policy should be set. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ "request": { ++ "$ref": "SecurityPolicyReference" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{resource}/testIamPermissions", +@@ -40632,7 +41521,7 @@ + } + } + }, +- "revision": "20230307", ++ "revision": "20230516", + "rootUrl": "https://compute.googleapis.com/", + "schemas": { + "AWSV4Signature": { +@@ -41079,11 +41968,11 @@ + "id": "AccessConfig", + "properties": { + "externalIpv6": { +- "description": "The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", ++ "description": "Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", + "type": "string" + }, + "externalIpv6PrefixLength": { +- "description": "The prefix length of the external IPv6 range.", ++ "description": "Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range.", + "format": "int32", + "type": "integer" + }, +@@ -41093,11 +41982,11 @@ + "type": "string" + }, + "name": { +- "description": "The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.", ++ "description": "The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6.", + "type": "string" + }, + "natIP": { +- "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", ++ "description": "Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", + "type": "string" + }, + "networkTier": { +@@ -41139,8 +42028,7 @@ + "type": "boolean" + }, + "type": { +- "default": "ONE_TO_ONE_NAT", +- "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", ++ "description": "The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6.", + "enum": [ + "DIRECT_IPV6", + "ONE_TO_ONE_NAT" +@@ -41778,6 +42666,20 @@ + "" + ], + "type": "string" ++ }, ++ "workloadType": { ++ "description": "The workload type of the instances that will target this reservation.", ++ "enum": [ ++ "BATCH", ++ "SERVING", ++ "UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "Reserved resources will be optimized for BATCH workloads, such as ML training.", ++ "Reserved resources will be optimized for SERVING workloads, such as ML inference.", ++ "" ++ ], ++ "type": "string" + } + }, + "type": "object" +@@ -42114,6 +43016,10 @@ + "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you specify this field when creating a VM, you can provide either the full or partial URL. For example, the following values are valid: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType If you specify this field when creating or updating an instance template or all-instances configuration, specify the type of the disk, not the URL. For example: pd-standard.", + "type": "string" + }, ++ "enableConfidentialCompute": { ++ "description": "Whether this disk is using confidential compute mode.", ++ "type": "boolean" ++ }, + "guestOsFeatures": { + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. Guest OS features are applied by merging initializeParams.guestOsFeatures and disks.guestOsFeatures", + "items": { +@@ -42225,6 +43131,10 @@ + "sourceSnapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source snapshot." ++ }, ++ "storagePool": { ++ "description": "The storage pool in which the new disk is created. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /storagePools/storagePool - projects/project/zones/zone/storagePools/storagePool - zones/zone/storagePools/storagePool ", ++ "type": "string" + } + }, + "type": "object" +@@ -42872,7 +43782,7 @@ + "id": "AutoscalingPolicy", + "properties": { + "coolDownPeriodSec": { +- "description": "The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", ++ "description": "The number of seconds that your application takes to initialize on a VM instance. This is referred to as the [initialization period](/compute/docs/autoscaler#cool_down_period). Specifying an accurate initialization period improves autoscaler decisions. For example, when scaling out, the autoscaler ignores data from VMs that are still initializing because those VMs might not yet represent normal usage of your application. The default initialization period is 60 seconds. Initialization periods might vary because of numerous factors. We recommend that you test how long your application takes to initialize. To do this, create a VM and time your application's startup process.", + "format": "int32", + "type": "integer" + }, +@@ -42902,7 +43812,7 @@ + "type": "integer" + }, + "mode": { +- "description": "Defines operating mode for this policy.", ++ "description": "Defines the operating mode for this policy. The following modes are available: - OFF: Disables the autoscaler but maintains its configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM instances only. - ON: Enables all autoscaler activities according to its policy. For more information, see \"Turning off or restricting an autoscaler\"", + "enum": [ + "OFF", + "ON", +@@ -43654,6 +44564,13 @@ + "$ref": "Duration", + "description": "Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED." + }, ++ "metadatas": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Deployment metadata associated with the resource to be set by a GKE hub controller and read by the backend RCTH", ++ "type": "object" ++ }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +@@ -44805,10 +45722,6 @@ + "format": "int64", + "type": "string" + }, +- "instance": { +- "$ref": "Instance", +- "description": "DEPRECATED: Please use instance_properties instead." +- }, + "instanceProperties": { + "$ref": "InstanceProperties", + "description": "The instance properties defining the VM instances to be created. Required if sourceInstanceTemplate is not provided." +@@ -44844,6 +45757,10 @@ + "description": "Per-instance properties to be set on individual instances. To be extended in the future.", + "id": "BulkInsertInstanceResourcePerInstanceProperties", + "properties": { ++ "hostname": { ++ "description": "Specifies the hostname of the instance. More details in: https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention", ++ "type": "string" ++ }, + "name": { + "description": "This field is only temporary. It will be removed. Do not use it.", + "type": "string" +@@ -44851,6 +45768,48 @@ + }, + "type": "object" + }, ++ "BulkInsertOperationStatus": { ++ "id": "BulkInsertOperationStatus", ++ "properties": { ++ "createdVmCount": { ++ "description": "[Output Only] Count of VMs successfully created so far.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "deletedVmCount": { ++ "description": "[Output Only] Count of VMs that got deleted during rollback.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "failedToCreateVmCount": { ++ "description": "[Output Only] Count of VMs that started creating but encountered an error.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "status": { ++ "description": "[Output Only] Creation status of BulkInsert operation - information if the flow is rolling forward or rolling back.", ++ "enum": [ ++ "CREATING", ++ "DONE", ++ "ROLLING_BACK", ++ "STATUS_UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "Rolling forward - creating VMs.", ++ "Done", ++ "Rolling back - cleaning up after an error.", ++ "" ++ ], ++ "type": "string" ++ }, ++ "targetVmCount": { ++ "description": "[Output Only] Count of VMs originally planned to be created.", ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "BundledLocalSsds": { + "id": "BundledLocalSsds", + "properties": { +@@ -45135,6 +46094,10 @@ + }, + "type": "array" + }, ++ "resourceStatus": { ++ "$ref": "CommitmentResourceStatus", ++ "description": "[Output Only] Status information for Commitment resource." ++ }, + "resources": { + "description": "A list of commitment amounts for particular resources. Note that VCPU and MEMORY resource commitments must occur together.", + "items": { +@@ -45151,7 +46114,7 @@ + "type": "string" + }, + "splitSourceCommitment": { +- "description": "Source commitment to be splitted into a new commitment.", ++ "description": "Source commitment to be split into a new commitment.", + "type": "string" + }, + "startTimestamp": { +@@ -45162,12 +46125,16 @@ + "description": "[Output Only] Status of the commitment with regards to eventual expiration (each commitment has an end date defined). One of the following values: NOT_YET_ACTIVE, ACTIVE, EXPIRED.", + "enum": [ + "ACTIVE", ++ "CANCELED_EARLY_TERMINATION", ++ "CANCELING", + "CANCELLED", + "CREATING", + "EXPIRED", + "NOT_YET_ACTIVE" + ], + "enumDescriptions": [ ++ "", ++ "", + "", + "Deprecate CANCELED status. Will use separate status to differentiate cancel by mergeCud or manual cancellation.", + "", +@@ -45187,6 +46154,7 @@ + "COMPUTE_OPTIMIZED", + "COMPUTE_OPTIMIZED_C2D", + "COMPUTE_OPTIMIZED_C3", ++ "COMPUTE_OPTIMIZED_C3D", + "GENERAL_PURPOSE", + "GENERAL_PURPOSE_E2", + "GENERAL_PURPOSE_N2", +@@ -45210,6 +46178,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -45464,6 +46433,43 @@ + }, + "type": "object" + }, ++ "CommitmentResourceStatus": { ++ "description": "[Output Only] Contains output only fields.", ++ "id": "CommitmentResourceStatus", ++ "properties": { ++ "cancellationInformation": { ++ "$ref": "CommitmentResourceStatusCancellationInformation", ++ "description": "[Output Only] An optional, contains all the needed information of cancellation." ++ } ++ }, ++ "type": "object" ++ }, ++ "CommitmentResourceStatusCancellationInformation": { ++ "id": "CommitmentResourceStatusCancellationInformation", ++ "properties": { ++ "canceledCommitment": { ++ "$ref": "Money", ++ "description": "[Output Only] An optional amount of CUDs canceled so far in the last 365 days." ++ }, ++ "canceledCommitmentLastUpdatedTimestamp": { ++ "description": "[Output Only] An optional last update time of canceled_commitment. RFC3339 text format.", ++ "type": "string" ++ }, ++ "cancellationCap": { ++ "$ref": "Money", ++ "description": "[Output Only] An optional,the cancellation cap for how much commitments can be canceled in a rolling 365 per billing account." ++ }, ++ "cancellationFee": { ++ "$ref": "Money", ++ "description": "[Output Only] An optional, cancellation fee." ++ }, ++ "cancellationFeeExpirationTimestamp": { ++ "description": "[Output Only] An optional, cancellation fee expiration time. RFC3339 text format.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "CommitmentsScopedList": { + "id": "CommitmentsScopedList", + "properties": { +@@ -45655,12 +46661,14 @@ + "enum": [ + "CONFIDENTIAL_INSTANCE_TYPE_UNSPECIFIED", + "SEV", +- "SEV_SNP" ++ "SEV_SNP", ++ "TDX" + ], + "enumDescriptions": [ + "No type specified. Do not use this value.", + "AMD Secure Encrypted Virtualization.", +- "AMD Secure Encrypted Virtualization - Secure Nested Paging." ++ "AMD Secure Encrypted Virtualization - Secure Nested Paging.", ++ "Intel Trust Domain eXtension." + ], + "type": "string" + }, +@@ -45942,6 +46950,10 @@ + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the disk using a customer-supplied encryption key or a customer-managed encryption key. Encryption keys do not protect access to metadata of the disk. After you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later. For example, to create a disk snapshot, to create a disk image, to create a machine image, or to attach the disk to a virtual machine. After you encrypt a disk with a customer-managed key, the diskEncryptionKey.kmsKeyName is set to a key *version* name once the disk is created. The disk is encrypted with this version of the key. In the response, diskEncryptionKey.kmsKeyName appears in the following format: \"diskEncryptionKey.kmsKeyName\": \"projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeysVersions/version If you do not provide an encryption key when creating the disk, then the disk is encrypted using an automatically generated key and you don't need to provide a key to use the disk later." + }, ++ "enableConfidentialCompute": { ++ "description": "Whether this disk is using confidential compute mode.", ++ "type": "boolean" ++ }, + "eraseWindowsVssSignature": { + "description": "Specifies whether the disk restored from a source snapshot should erase Windows specific VSS signature.", + "type": "boolean" +@@ -46166,6 +47178,10 @@ + ], + "type": "string" + }, ++ "storagePool": { ++ "description": "The storage pool in which the new disk is created. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /storagePools/storagePool - projects/project/zones/zone/storagePools/storagePool - zones/zone/storagePools/storagePool ", ++ "type": "string" ++ }, + "storageType": { + "description": "[Deprecated] Storage type of the persistent disk.", + "enum": [ +@@ -46333,6 +47349,14 @@ + "DiskAsyncReplication": { + "id": "DiskAsyncReplication", + "properties": { ++ "consistencyGroupPolicy": { ++ "description": "[Output Only] URL of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, ++ "consistencyGroupPolicyId": { ++ "description": "[Output Only] ID of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, + "disk": { + "description": "The other disk asynchronously replicated to or from the current disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk ", + "type": "string" +@@ -46554,6 +47578,11 @@ + }, + "description": "Key: disk, value: AsyncReplicationStatus message", + "type": "object" ++ }, ++ "usedBytes": { ++ "description": "[Output Only] Space used by data stored in the disk (in bytes). Note that this field is set only when the disk is in a storage pool.", ++ "format": "int64", ++ "type": "string" + } + }, + "type": "object" +@@ -48105,6 +49134,11 @@ + "description": "The name for an association.", + "type": "string" + }, ++ "priority": { ++ "description": "An integer indicating the priority of an association. The priority must be a positive value between 1 and 2147483647. Firewall Policies are evaluated from highest to lowest priority where 1 is the highest priority and 2147483647 is the lowest priority. The default value is `1000`. If two associations have the same priority then lexicographical order on association names is applied.", ++ "format": "int32", ++ "type": "integer" ++ }, + "shortName": { + "description": "[Output Only] The short name of the firewall policy of the association.", + "type": "string" +@@ -48605,7 +49639,7 @@ + "type": "string" + }, + "network": { +- "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", ++ "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "type": "string" + }, + "networkTier": { +@@ -49191,6 +50225,22 @@ + "description": "[Output only] Represents status related to the future reservation.", + "id": "FutureReservationStatus", + "properties": { ++ "amendmentStatus": { ++ "description": "The current status of the requested amendment.", ++ "enum": [ ++ "AMENDMENT_APPROVED", ++ "AMENDMENT_DECLINED", ++ "AMENDMENT_IN_REVIEW", ++ "AMENDMENT_STATUS_UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "The requested amendment to the Future Resevation has been approved and applied by GCP.", ++ "The requested amendment to the Future Reservation has been declined by GCP and the original state was restored.", ++ "The requested amendment to the Future Reservation is currently being reviewd by GCP.", ++ "" ++ ], ++ "type": "string" ++ }, + "autoCreatedReservations": { + "description": "Fully qualified urls of the automatically created reservations at start_time.", + "items": { +@@ -49203,6 +50253,10 @@ + "format": "int64", + "type": "string" + }, ++ "lastKnownGoodState": { ++ "$ref": "FutureReservationStatusLastKnownGoodState", ++ "description": "This field represents the future reservation before an amendment was requested. If the amendment is declined, the Future Reservation will be reverted to the last known good state. The last known good state is not set when updating a future reservation whose Procurement Status is DRAFTING." ++ }, + "lockTime": { + "description": "Time when Future Reservation would become LOCKED, after which no modifications to Future Reservation will be allowed. Applicable only after the Future Reservation is in the APPROVED state. The lock_time is an RFC3339 string. The procurement_status will transition to PROCURING state at this time.", + "type": "string" +@@ -49218,6 +50272,7 @@ + "FAILED", + "FAILED_PARTIALLY_FULFILLED", + "FULFILLED", ++ "PENDING_AMENDMENT_APPROVAL", + "PENDING_APPROVAL", + "PROCUREMENT_STATUS_UNSPECIFIED", + "PROCURING", +@@ -49232,6 +50287,7 @@ + "Future reservation failed. No additional reservations were provided.", + "Future reservation is partially fulfilled. Additional reservations were provided but did not reach total_count reserved instance slots.", + "Future reservation is fulfilled completely.", ++ "An Amendment to the Future Reservation has been requested. If the Amendment is declined, the Future Reservation will be restored to the last known good state.", + "Future reservation is pending approval by GCP.", + "", + "Future reservation is being procured by GCP. Beyond this point, Future reservation is locked and no further modifications are allowed.", +@@ -49245,6 +50301,77 @@ + }, + "type": "object" + }, ++ "FutureReservationStatusLastKnownGoodState": { ++ "description": "The state that the future reservation will be reverted to should the amendment be declined.", ++ "id": "FutureReservationStatusLastKnownGoodState", ++ "properties": { ++ "description": { ++ "description": "The description of the FutureReservation before an amendment was requested.", ++ "type": "string" ++ }, ++ "futureReservationSpecs": { ++ "$ref": "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs" ++ }, ++ "namePrefix": { ++ "description": "The name prefix of the Future Reservation before an amendment was requested.", ++ "type": "string" ++ }, ++ "procurementStatus": { ++ "description": "The status of the last known good state for the Future Reservation.", ++ "enum": [ ++ "APPROVED", ++ "CANCELLED", ++ "COMMITTED", ++ "DECLINED", ++ "DRAFTING", ++ "FAILED", ++ "FAILED_PARTIALLY_FULFILLED", ++ "FULFILLED", ++ "PENDING_AMENDMENT_APPROVAL", ++ "PENDING_APPROVAL", ++ "PROCUREMENT_STATUS_UNSPECIFIED", ++ "PROCURING", ++ "PROVISIONING" ++ ], ++ "enumDescriptions": [ ++ "Future reservation is approved by GCP.", ++ "Future reservation is cancelled by the customer.", ++ "Future reservation is committed by the customer.", ++ "Future reservation is rejected by GCP.", ++ "Related status for PlanningStatus.Draft. Transitions to PENDING_APPROVAL upon user submitting FR.", ++ "Future reservation failed. No additional reservations were provided.", ++ "Future reservation is partially fulfilled. Additional reservations were provided but did not reach total_count reserved instance slots.", ++ "Future reservation is fulfilled completely.", ++ "An Amendment to the Future Reservation has been requested. If the Amendment is declined, the Future Reservation will be restored to the last known good state.", ++ "Future reservation is pending approval by GCP.", ++ "", ++ "Future reservation is being procured by GCP. Beyond this point, Future reservation is locked and no further modifications are allowed.", ++ "Future reservation capacity is being provisioned. This state will be entered after start_time, while reservations are being created to provide total_count reserved instance slots. This state will not persist past start_time + 24h." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs": { ++ "description": "The properties of the last known good state for the Future Reservation.", ++ "id": "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs", ++ "properties": { ++ "shareSettings": { ++ "$ref": "ShareSettings", ++ "description": "The previous share settings of the Future Reservation." ++ }, ++ "specificSkuProperties": { ++ "$ref": "FutureReservationSpecificSKUProperties", ++ "description": "The previous instance related properties of the Future Reservation." ++ }, ++ "timeWindow": { ++ "$ref": "FutureReservationTimeWindow", ++ "description": "The previous time window of the Future Reservation." ++ } ++ }, ++ "type": "object" ++ }, + "FutureReservationStatusSpecificSKUProperties": { + "description": "Properties to be set for the Future Reservation.", + "id": "FutureReservationStatusSpecificSKUProperties", +@@ -49867,7 +50994,7 @@ + "id": "GuestOsFeature", + "properties": { + "type": { +- "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling guest operating system features.", ++ "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.", + "enum": [ + "BARE_METAL_LINUX_COMPATIBLE", + "FEATURE_TYPE_UNSPECIFIED", +@@ -49877,6 +51004,7 @@ + "SEV_CAPABLE", + "SEV_LIVE_MIGRATABLE", + "SEV_SNP_CAPABLE", ++ "TDX_CAPABLE", + "UEFI_COMPATIBLE", + "VIRTIO_SCSI_MULTIQUEUE", + "WINDOWS" +@@ -49892,6 +51020,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -50101,7 +51230,7 @@ + "type": "object" + }, + "HealthCheck": { +- "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/alpha/healthChecks) * [Regional](/compute/docs/reference/rest/alpha/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.HealthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", ++ "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/alpha/healthChecks) * [Regional](/compute/docs/reference/rest/alpha/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.healthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.healthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", + "id": "HealthCheck", + "properties": { + "checkIntervalSec": { +@@ -51058,7 +52187,7 @@ + "type": "string" + }, + "healthState": { +- "description": "Health state of the instance.", ++ "description": "Health state of the IPv4 address of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" +@@ -52656,7 +53785,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -53241,6 +54370,10 @@ + "format": "uint64", + "type": "string" + }, ++ "instanceFlexibilityPolicy": { ++ "$ref": "InstanceGroupManagerInstanceFlexibilityPolicy", ++ "description": "Instance flexibility allowing MIG to create VMs from multiple types of machines. Instance flexibility configuration on MIG overrides instance template configuration." ++ }, + "instanceGroup": { + "description": "[Output Only] The URL of the Instance Group resource.", + "type": "string" +@@ -53334,6 +54467,18 @@ + "format": "int32", + "type": "integer" + }, ++ "targetSizeUnit": { ++ "description": "The unit of measure for the target size.", ++ "enum": [ ++ "VCPU", ++ "VM" ++ ], ++ "enumDescriptions": [ ++ "TargetSize is the target count of vCPUs of VMs.", ++ "[Default] TargetSize is the target number of VMs." ++ ], ++ "type": "string" ++ }, + "targetStoppedSize": { + "description": "The target number of stopped instances for this managed instance group. This number changes when you: - Stop instance using the stopInstances method or start instances using the startInstances method. - Manually change the targetStoppedSize using the update method. ", + "format": "int32", +@@ -53592,7 +54737,7 @@ + "type": "string" + }, + "initialDelaySec": { +- "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", ++ "description": "The initial delay is the number of seconds that a new VM takes to initialize and run its startup script. During a VM's initial delay period, the MIG ignores unsuccessful health checks because the VM might be in the startup process. This prevents the MIG from prematurely recreating a VM. If the health check receives a healthy response during the initial delay, it indicates that the startup process is complete and the VM is ready. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.", + "format": "int32", + "type": "integer" + }, +@@ -53621,6 +54766,37 @@ + }, + "type": "object" + }, ++ "InstanceGroupManagerInstanceFlexibilityPolicy": { ++ "id": "InstanceGroupManagerInstanceFlexibilityPolicy", ++ "properties": { ++ "instanceSelectionLists": { ++ "additionalProperties": { ++ "$ref": "InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection" ++ }, ++ "description": "List of instance selection options that the group will use when creating new VMs.", ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection": { ++ "id": "InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection", ++ "properties": { ++ "machineTypes": { ++ "description": "Full machine-type names, e.g. \"n1-standard-16\".", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "rank": { ++ "description": "Preference of this instance selection. Lower number means higher preference. MIG will first try to create a VM based on the machine-type with lowest rank and fallback to next rank based on availability. Machine types and instance selections with the same rank have the same preference.", ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "InstanceGroupManagerInstanceLifecyclePolicy": { + "id": "InstanceGroupManagerInstanceLifecyclePolicy", + "properties": { +@@ -54340,7 +55516,7 @@ + "id": "InstanceGroupManagersDeleteInstancesRequest", + "properties": { + "instances": { +- "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", ++ "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have URL and can be deleted only by name. One cannot specify both URLs and names in a single request.", + "items": { + "type": "string" + }, +@@ -56058,7 +57234,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -56087,6 +57263,19 @@ + }, + "type": "object" + }, ++ "InstancesBulkInsertOperationMetadata": { ++ "id": "InstancesBulkInsertOperationMetadata", ++ "properties": { ++ "perLocationStatus": { ++ "additionalProperties": { ++ "$ref": "BulkInsertOperationStatus" ++ }, ++ "description": "Status information per location (location name is key). Example key: zones/us-central1-a", ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, + "InstancesGetEffectiveFirewallsResponse": { + "id": "InstancesGetEffectiveFirewallsResponse", + "properties": { +@@ -56370,6 +57559,23 @@ + }, + "type": "object" + }, ++ "InstancesSetSecurityPolicyRequest": { ++ "id": "InstancesSetSecurityPolicyRequest", ++ "properties": { ++ "networkInterfaces": { ++ "description": "The network interfaces that the security policy will be applied to. Network interfaces use the nicN naming format. You can only set a security policy for network interfaces with an access config.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "securityPolicy": { ++ "description": "A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "InstancesSetServiceAccountRequest": { + "id": "InstancesSetServiceAccountRequest", + "properties": { +@@ -56941,13 +58147,26 @@ + "type": "object" + }, + "Interconnect": { +- "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", ++ "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the Google Cloud network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", + "id": "Interconnect", + "properties": { + "adminEnabled": { + "description": "Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.", + "type": "boolean" + }, ++ "availableFeatures": { ++ "description": "[Output only] List of features available for this interconnect, which can take one of the following values: - MACSEC If present then the interconnect was created on MACsec capable hardware ports. If not present then the interconnect is provisioned on non-MACsec capable ports and MACsec enablement will fail.", ++ "items": { ++ "enum": [ ++ "IF_MACSEC" ++ ], ++ "enumDescriptions": [ ++ "Media Access Control security (MACsec)" ++ ], ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "circuitInfos": { + "description": "[Output Only] A list of CircuitInfo objects, that describe the individual circuits in this LAG.", + "items": { +@@ -57088,6 +58307,19 @@ + "description": "Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.", + "type": "string" + }, ++ "requestedFeatures": { ++ "description": "Optional. List of features requested for this interconnect, which can take one of the following values: - MACSEC If specified then the interconnect will be created on MACsec capable hardware ports. If not specified, the default value is false, which will allocate non-MACsec capable ports first if available. This parameter can only be provided during interconnect INSERT and cannot be changed using interconnect PATCH. Please review Interconnect Pricing for implications on enabling this flag.", ++ "items": { ++ "enum": [ ++ "IF_MACSEC" ++ ], ++ "enumDescriptions": [ ++ "Media Access Control security (MACsec)" ++ ], ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "requestedLinkCount": { + "description": "Target number of physical links in the link bundle, as requested by the customer.", + "format": "int32", +@@ -57255,7 +58487,7 @@ + "type": "string" + }, + "ipsecInternalAddresses": { +- "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool. Not currently available publicly. ", ++ "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool.", + "items": { + "type": "string" + }, +@@ -58156,6 +59388,34 @@ + "description": "[Output Only] Availability zone for this InterconnectLocation. Within a metropolitan area (metro), maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\".", + "type": "string" + }, ++ "availableFeatures": { ++ "description": "[Output only] List of features available at this interconnect location, which can take one of the following values: - MACSEC ", ++ "items": { ++ "enum": [ ++ "IF_MACSEC" ++ ], ++ "enumDescriptions": [ ++ "Media Access Control security (MACsec)" ++ ], ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "availableLinkTypes": { ++ "description": "[Output only] List of link types available at this interconnect location, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR - LINK_TYPE_ETHERNET_100G_LR ", ++ "items": { ++ "enum": [ ++ "LINK_TYPE_ETHERNET_100G_LR", ++ "LINK_TYPE_ETHERNET_10G_LR" ++ ], ++ "enumDescriptions": [ ++ "100G Ethernet, LR Optics.", ++ "10G Ethernet, LR Optics. [(rate_bps) = 10000000000];" ++ ], ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "city": { + "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", + "type": "string" +@@ -58557,7 +59817,7 @@ + "type": "object" + }, + "InterconnectRemoteLocation": { +- "description": "Represents an Cross-Cloud Interconnect Remote Location resource. You can use this resource to find remote location details about an Interconnect attachment (VLAN).", ++ "description": "Represents a Cross-Cloud Interconnect Remote Location resource. You can use this resource to find remote location details about an Interconnect attachment (VLAN).", + "id": "InterconnectRemoteLocation", + "properties": { + "address": { +@@ -58628,7 +59888,7 @@ + ], + "enumDescriptions": [ + "LACP_SUPPORTED: LACP is supported, and enabled by default on the Cross-Cloud Interconnect.", +- "LACP_UNSUPPORTED: LACP is not supported and will not be enabled on this port. GetDiagnostics will show bundleAggregationType as \"static\". GCP does not support LAGs without LACP, so requestedLinkCount must be 1." ++ "LACP_UNSUPPORTED: LACP is not supported and is not be enabled on this port. GetDiagnostics shows bundleAggregationType as \"static\". GCP does not support LAGs without LACP, so requestedLinkCount must be 1." + ], + "type": "string" + }, +@@ -58688,7 +59948,7 @@ + "id": "InterconnectRemoteLocationConstraints", + "properties": { + "portPairRemoteLocation": { +- "description": "[Output Only] Port pair remote location constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to individual ports, but the UI uses this field when ordering a pair of ports, to prevent users from accidentally ordering something that is incompatible with their cloud provider. Specifically, when ordering a redundant pair of Cross-Cloud Interconnect ports, and one of them uses a remote location with portPairMatchingRemoteLocation set to matching, the UI will require that both ports use the same remote location.", ++ "description": "[Output Only] Port pair remote location constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to individual ports, but the UI uses this field when ordering a pair of ports, to prevent users from accidentally ordering something that is incompatible with their cloud provider. Specifically, when ordering a redundant pair of Cross-Cloud Interconnect ports, and one of them uses a remote location with portPairMatchingRemoteLocation set to matching, the UI requires that both ports use the same remote location.", + "enum": [ + "PORT_PAIR_MATCHING_REMOTE_LOCATION", + "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" +@@ -58750,224 +60010,224 @@ + "kind": { + "default": "compute#interconnectRemoteLocationList", + "description": "[Output Only] Type of resource. Always compute#interconnectRemoteLocationList for lists of interconnect remote locations.", +- "type": "string" +- }, +- "nextPageToken": { +- "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", +- "type": "string" +- }, +- "selfLink": { +- "description": "[Output Only] Server-defined URL for this resource.", +- "type": "string" +- }, +- "warning": { +- "description": "[Output Only] Informational warning message.", +- "properties": { +- "code": { +- "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", +- "enum": [ +- "CLEANUP_FAILED", +- "DEPRECATED_RESOURCE_USED", +- "DEPRECATED_TYPE_USED", +- "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", +- "EXPERIMENTAL_TYPE_USED", +- "EXTERNAL_API_WARNING", +- "FIELD_VALUE_OVERRIDEN", +- "INJECTED_KERNELS_DEPRECATED", +- "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", +- "LARGE_DEPLOYMENT_WARNING", +- "MISSING_TYPE_DEPENDENCY", +- "NEXT_HOP_ADDRESS_NOT_ASSIGNED", +- "NEXT_HOP_CANNOT_IP_FORWARD", +- "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", +- "NEXT_HOP_INSTANCE_NOT_FOUND", +- "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", +- "NEXT_HOP_NOT_RUNNING", +- "NOT_CRITICAL_ERROR", +- "NO_RESULTS_ON_PAGE", +- "PARTIAL_SUCCESS", +- "REQUIRED_TOS_AGREEMENT", +- "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", +- "RESOURCE_NOT_DELETED", +- "SCHEMA_VALIDATION_IGNORED", +- "SINGLE_INSTANCE_PROPERTY_TEMPLATE", +- "UNDECLARED_PROPERTIES", +- "UNREACHABLE" +- ], +- "enumDescriptions": [ +- "Warning about failed cleanup of transient changes made by a failed operation.", +- "A link to a deprecated resource was created.", +- "When deploying and at least one of the resources has a type marked as deprecated", +- "The user created a boot disk that is larger than image size.", +- "When deploying and at least one of the resources has a type marked as experimental", +- "Warning that is present in an external api call", +- "Warning that value of a field has been overridden. Deprecated unused field.", +- "The operation involved use of an injected kernel, which is deprecated.", +- "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", +- "When deploying a deployment with a exceedingly large number of resources", +- "A resource depends on a missing type", +- "The route's nextHopIp address is not assigned to an instance on the network.", +- "The route's next hop instance cannot ip forward.", +- "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", +- "The route's nextHopInstance URL refers to an instance that does not exist.", +- "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", +- "The route's next hop instance does not have a status of RUNNING.", +- "Error which is not critical. We decided to continue the process despite the mentioned error.", +- "No results are present on a particular list page.", +- "Success is reported, but some results may be missing due to errors", +- "The user attempted to use a resource that requires a TOS they have not accepted.", +- "Warning that a resource is in use.", +- "One or more of the resources set to auto-delete could not be deleted because they were in use.", +- "When a resource schema validation is ignored.", +- "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", +- "When undeclared properties in the schema are present", +- "A given scope cannot be reached." +- ], +- "type": "string" +- }, +- "data": { +- "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", +- "items": { +- "properties": { +- "key": { +- "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", +- "type": "string" +- }, +- "value": { +- "description": "[Output Only] A warning data value corresponding to the key.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "type": "array" +- }, +- "message": { +- "description": "[Output Only] A human-readable description of the warning code.", +- "type": "string" +- } +- }, +- "type": "object" +- } +- }, +- "type": "object" +- }, +- "InterconnectRemoteLocationPermittedConnections": { +- "id": "InterconnectRemoteLocationPermittedConnections", +- "properties": { +- "interconnectLocation": { +- "description": "[Output Only] URL of an Interconnect location that is permitted to connect to this Interconnect remote location.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InterconnectsGetDiagnosticsResponse": { +- "description": "Response for the InterconnectsGetDiagnosticsRequest.", +- "id": "InterconnectsGetDiagnosticsResponse", +- "properties": { +- "result": { +- "$ref": "InterconnectDiagnostics" +- } +- }, +- "type": "object" +- }, +- "InterconnectsGetMacsecConfigResponse": { +- "description": "Response for the InterconnectsGetMacsecConfigRequest.", +- "id": "InterconnectsGetMacsecConfigResponse", +- "properties": { +- "etag": { +- "description": "end_interface: MixerGetResponseWithEtagBuilder", +- "type": "string" +- }, +- "result": { +- "$ref": "InterconnectMacsecConfig" +- } +- }, +- "type": "object" +- }, +- "InternalIpAddress": { +- "id": "InternalIpAddress", +- "properties": { +- "cidr": { +- "description": "IP CIDR address or range.", +- "type": "string" +- }, +- "owner": { +- "description": "The owner of the internal IP address.", +- "type": "string" +- }, +- "purpose": { +- "description": "The purpose of the internal IP address if applicable.", +- "type": "string" +- }, +- "region": { +- "description": "The region of the internal IP address if applicable.", +- "type": "string" +- }, +- "type": { +- "description": "The type of the internal IP address.", +- "enum": [ +- "PEER_RESERVED", +- "PEER_USED", +- "REMOTE_RESERVED", +- "REMOTE_USED", +- "RESERVED", +- "SUBNETWORK", +- "TYPE_UNSPECIFIED" +- ], +- "enumDescriptions": [ +- "Reserved IP ranges on peer networks.", +- "Used IP ranges on peer networks, including peer subnetwork IP ranges.", +- "Reserved IP ranges on peer networks of peer networks.", +- "Used IP ranges on peer networks of peer networks.", +- "Reserved IP ranges on local network.", +- "Subnetwork IP ranges on local network.", +- "" +- ], +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InternalIpOwner": { +- "id": "InternalIpOwner", +- "properties": { +- "ipCidrRange": { +- "description": "IP CIDR range being owned.", +- "type": "string" +- }, +- "owners": { +- "description": "URLs of the IP owners of the IP CIDR range.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- }, +- "systemOwned": { +- "description": "Whether this IP CIDR range is reserved for system use.", +- "type": "boolean" +- } +- }, +- "type": "object" +- }, +- "IpAddressesList": { +- "id": "IpAddressesList", +- "properties": { +- "id": { +- "description": "[Output Only] Unique identifier for the resource; defined by the server.", +- "type": "string" +- }, +- "items": { +- "description": "A list of InternalIpAddress resources.", +- "items": { +- "$ref": "InternalIpAddress" +- }, +- "type": "array" +- }, +- "kind": { +- "default": "compute#ipAddressesList", +- "description": "[Output Only] Type of resource. Always compute#ipAddressesList for IP addresses lists.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token lets you get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationPermittedConnections": { ++ "id": "InterconnectRemoteLocationPermittedConnections", ++ "properties": { ++ "interconnectLocation": { ++ "description": "[Output Only] URL of an Interconnect location that is permitted to connect to this Interconnect remote location.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectsGetDiagnosticsResponse": { ++ "description": "Response for the InterconnectsGetDiagnosticsRequest.", ++ "id": "InterconnectsGetDiagnosticsResponse", ++ "properties": { ++ "result": { ++ "$ref": "InterconnectDiagnostics" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectsGetMacsecConfigResponse": { ++ "description": "Response for the InterconnectsGetMacsecConfigRequest.", ++ "id": "InterconnectsGetMacsecConfigResponse", ++ "properties": { ++ "etag": { ++ "description": "end_interface: MixerGetResponseWithEtagBuilder", ++ "type": "string" ++ }, ++ "result": { ++ "$ref": "InterconnectMacsecConfig" ++ } ++ }, ++ "type": "object" ++ }, ++ "InternalIpAddress": { ++ "id": "InternalIpAddress", ++ "properties": { ++ "cidr": { ++ "description": "IP CIDR address or range.", ++ "type": "string" ++ }, ++ "owner": { ++ "description": "The owner of the internal IP address.", ++ "type": "string" ++ }, ++ "purpose": { ++ "description": "The purpose of the internal IP address if applicable.", ++ "type": "string" ++ }, ++ "region": { ++ "description": "The region of the internal IP address if applicable.", ++ "type": "string" ++ }, ++ "type": { ++ "description": "The type of the internal IP address.", ++ "enum": [ ++ "PEER_RESERVED", ++ "PEER_USED", ++ "REMOTE_RESERVED", ++ "REMOTE_USED", ++ "RESERVED", ++ "SUBNETWORK", ++ "TYPE_UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "Reserved IP ranges on peer networks.", ++ "Used IP ranges on peer networks, including peer subnetwork IP ranges.", ++ "Reserved IP ranges on peer networks of peer networks.", ++ "Used IP ranges on peer networks of peer networks.", ++ "Reserved IP ranges on local network.", ++ "Subnetwork IP ranges on local network.", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InternalIpOwner": { ++ "id": "InternalIpOwner", ++ "properties": { ++ "ipCidrRange": { ++ "description": "IP CIDR range being owned.", ++ "type": "string" ++ }, ++ "owners": { ++ "description": "URLs of the IP owners of the IP CIDR range.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "systemOwned": { ++ "description": "Whether this IP CIDR range is reserved for system use.", ++ "type": "boolean" ++ } ++ }, ++ "type": "object" ++ }, ++ "IpAddressesList": { ++ "id": "IpAddressesList", ++ "properties": { ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "description": "A list of InternalIpAddress resources.", ++ "items": { ++ "$ref": "InternalIpAddress" ++ }, ++ "type": "array" ++ }, ++ "kind": { ++ "default": "compute#ipAddressesList", ++ "description": "[Output Only] Type of resource. Always compute#ipAddressesList for IP addresses lists.", + "type": "string" + }, + "nextPageToken": { +@@ -59983,7 +61243,7 @@ + "type": "integer" + }, + "guestAcceleratorType": { +- "description": "The accelerator type resource name, not a full URL, e.g. 'nvidia-tesla-k80'.", ++ "description": "The accelerator type resource name, not a full URL, e.g. nvidia-tesla-t4.", + "type": "string" + } + }, +@@ -60501,7 +61761,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -60787,6 +62047,27 @@ + }, + "type": "object" + }, ++ "Money": { ++ "description": "Represents an amount of money with its currency type.", ++ "id": "Money", ++ "properties": { ++ "currencyCode": { ++ "description": "The three-letter currency code defined in ISO 4217.", ++ "type": "string" ++ }, ++ "nanos": { ++ "description": "Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "units": { ++ "description": "The whole units of the amount. For example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", ++ "format": "int64", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "MutualTls": { + "description": "[Deprecated] Configuration for the mutual Tls mode for peer authentication. Configuration for the mutual Tls mode for peer authentication.", + "id": "MutualTls", +@@ -60824,6 +62105,72 @@ + }, + "type": "object" + }, ++ "NatIpInfo": { ++ "description": "Contains NAT IP information of a NAT config (i.e. usage status, mode).", ++ "id": "NatIpInfo", ++ "properties": { ++ "natIpInfoMappings": { ++ "description": "A list of all NAT IPs assigned to this NAT config.", ++ "items": { ++ "$ref": "NatIpInfoNatIpInfoMapping" ++ }, ++ "type": "array" ++ }, ++ "natName": { ++ "description": "Name of the NAT config which the NAT IP belongs to.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "NatIpInfoNatIpInfoMapping": { ++ "description": "Contains information of a NAT IP.", ++ "id": "NatIpInfoNatIpInfoMapping", ++ "properties": { ++ "mode": { ++ "description": "Specifies whether NAT IP is auto or manual.", ++ "enum": [ ++ "AUTO", ++ "MANUAL" ++ ], ++ "enumDescriptions": [ ++ "", ++ "" ++ ], ++ "type": "string" ++ }, ++ "natIp": { ++ "description": "NAT IP address. For example: 203.0.113.11.", ++ "type": "string" ++ }, ++ "usage": { ++ "description": "Specifies whether NAT IP is currently serving at least one endpoint or not.", ++ "enum": [ ++ "IN_USE", ++ "UNUSED" ++ ], ++ "enumDescriptions": [ ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "NatIpInfoResponse": { ++ "id": "NatIpInfoResponse", ++ "properties": { ++ "result": { ++ "description": "[Output Only] A list of NAT IP information.", ++ "items": { ++ "$ref": "NatIpInfo" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "Network": { + "description": "Represents a VPC Network resource. Networks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network.", + "id": "Network", +@@ -60854,7 +62201,7 @@ + "type": "string" + }, + "gatewayIPv4": { +- "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", ++ "description": "[Output Only] The gateway address for default routing out of the network, selected by Google Cloud.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", + "type": "string" + }, +@@ -60965,7 +62312,7 @@ + "type": "string" + }, + "fingerprint": { +- "description": "[Output Only] Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", ++ "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", + "format": "byte", + "type": "string" + }, +@@ -60990,7 +62337,7 @@ + "type": "string" + }, + "network": { +- "description": "[Output Only] The URL of the network which the Network Attachment belongs to.", ++ "description": "[Output Only] The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks.", + "type": "string" + }, + "producerAcceptLists": { +@@ -61154,7 +62501,7 @@ + "id": "NetworkAttachmentConnectedEndpoint", + "properties": { + "ipAddress": { +- "description": "The IP address assigned to the producer instance network interface. This value will be a range in case of Serverless.", ++ "description": "The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless.", + "type": "string" + }, + "projectIdOrNum": { +@@ -61162,7 +62509,7 @@ + "type": "string" + }, + "secondaryIpCidrRanges": { +- "description": "Alias IP ranges from the same subnetwork", ++ "description": "Alias IP ranges from the same subnetwork.", + "items": { + "type": "string" + }, +@@ -62636,7 +63983,7 @@ + "type": "integer" + }, + "stackType": { +- "description": "The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations.", ++ "description": "The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations.", + "enum": [ + "IPV4_IPV6", + "IPV4_ONLY" +@@ -65263,6 +66610,9 @@ + "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format.", + "type": "string" + }, ++ "instancesBulkInsertOperationMetadata": { ++ "$ref": "InstancesBulkInsertOperationMetadata" ++ }, + "kind": { + "default": "compute#operation", + "description": "[Output Only] Type of the resource. Always `compute#operation` for Operation resources.", +@@ -68475,11 +69825,15 @@ + "NETWORK_ATTACHMENTS", + "NETWORK_ENDPOINT_GROUPS", + "NETWORK_FIREWALL_POLICIES", ++ "NET_LB_SECURITY_POLICIES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION", + "NODE_GROUPS", + "NODE_TEMPLATES", + "NVIDIA_A100_80GB_GPUS", + "NVIDIA_A100_GPUS", + "NVIDIA_K80_GPUS", ++ "NVIDIA_L4_GPUS", + "NVIDIA_P100_GPUS", + "NVIDIA_P100_VWS_GPUS", + "NVIDIA_P4_GPUS", +@@ -68494,6 +69848,7 @@ + "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS", + "PREEMPTIBLE_NVIDIA_A100_GPUS", + "PREEMPTIBLE_NVIDIA_K80_GPUS", ++ "PREEMPTIBLE_NVIDIA_L4_GPUS", + "PREEMPTIBLE_NVIDIA_P100_GPUS", + "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", + "PREEMPTIBLE_NVIDIA_P4_GPUS", +@@ -68521,6 +69876,7 @@ + "ROUTES", + "SECURITY_POLICIES", + "SECURITY_POLICIES_PER_REGION", ++ "SECURITY_POLICY_ADVANCED_RULES_PER_REGION", + "SECURITY_POLICY_CEVAL_RULES", + "SECURITY_POLICY_RULES", + "SECURITY_POLICY_RULES_PER_REGION", +@@ -68678,6 +70034,12 @@ + "", + "", + "", ++ "", ++ "", ++ "", ++ "", ++ "", ++ "", + "The total number of snapshots allowed for a single project.", + "", + "", +@@ -71672,9 +73034,6 @@ + }, + "description": "[Output Only] Represents the status of the service integration specs defined by the user in instance.serviceIntegrationSpecs.", + "type": "object" +- }, +- "upcomingMaintenance": { +- "$ref": "ResourceStatusUpcomingMaintenance" + } + }, + "type": "object" +@@ -71733,16 +73092,6 @@ + }, + "type": "object" + }, +- "ResourceStatusUpcomingMaintenance": { +- "id": "ResourceStatusUpcomingMaintenance", +- "properties": { +- "canReschedule": { +- "description": "Indicates if the maintenance can be customer triggered. See go/sf-ctm-design for more details", +- "type": "boolean" +- } +- }, +- "type": "object" +- }, + "RolloutPolicy": { + "description": "A rollout policy configuration.", + "id": "RolloutPolicy", +@@ -72514,14 +73863,14 @@ + "description": "BFD configuration for the BGP peering." + }, + "customLearnedIpRanges": { +- "description": "User-defined Custom Learned Route IP range list for a BGP session.", ++ "description": "A list of user-defined custom learned route IP address ranges for a BGP session.", + "items": { + "$ref": "RouterBgpPeerCustomLearnedIpRange" + }, + "type": "array" + }, + "customLearnedRoutePriority": { +- "description": "User-defined Custom Learned Route Priority for a BGP session. This will be applied to all Custom Learned Route ranges of the BGP session, if not given, google-managed priority of 100 is used.", ++ "description": "The user-defined custom learned route priority for a BGP session. This value is applied to all custom learned route ranges for the session. You can choose a value from `0` to `65335`. If you don't provide a value, Google Cloud assigns a priority of `100` to the ranges.", + "format": "int32", + "type": "integer" + }, +@@ -72674,7 +74023,7 @@ + "id": "RouterBgpPeerCustomLearnedIpRange", + "properties": { + "range": { +- "description": "The Custom Learned Route IP range. Must be a valid CIDR-formatted prefix. If an IP is provided without a subnet mask, it is interpreted as a /32 singular IP range for IPv4, and /128 for IPv6.", ++ "description": "The custom learned route IP address range. Must be a valid CIDR-formatted prefix. If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, a `/32` singular IP address range, and, for IPv6, `/128`.", + "type": "string" + } + }, +@@ -72982,7 +74331,7 @@ + "type": "array" + }, + "sourceSubnetworkIpRangesToNat": { +- "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", ++ "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then there should not be any other Router.Nat section in any Router for this network in this region.", + "enum": [ + "ALL_SUBNETWORKS_ALL_IP_RANGES", + "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", +@@ -74415,15 +75764,15 @@ + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig": { +- "description": "Configuration options for L7 DDoS detection.", ++ "description": "Configuration options for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig", + "properties": { + "enable": { +- "description": "If set to true, enables CAAP for L7 DDoS detection.", ++ "description": "If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "boolean" + }, + "ruleVisibility": { +- "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules.", ++ "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "enum": [ + "PREMIUM", + "STANDARD" +@@ -74447,9 +75796,11 @@ + "jsonParsing": { + "enum": [ + "DISABLED", +- "STANDARD" ++ "STANDARD", ++ "STANDARD_WITH_GRAPHQL" + ], + "enumDescriptions": [ ++ "", + "", + "" + ], +@@ -74465,6 +75816,13 @@ + "" + ], + "type": "string" ++ }, ++ "userIpRequestHeaders": { ++ "description": "An optional list of case-insensitive request header names to use for resolving the callers client IP address.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" + } + }, + "type": "object" +@@ -74663,7 +76021,7 @@ + "id": "SecurityPolicyRecaptchaOptionsConfig", + "properties": { + "redirectSiteKey": { +- "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used.", ++ "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + } + }, +@@ -74683,7 +76041,7 @@ + "id": "SecurityPolicyRule", + "properties": { + "action": { +- "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", ++ "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", + "type": "string" + }, + "description": { +@@ -74708,7 +76066,7 @@ + }, + "headerAction": { + "$ref": "SecurityPolicyRuleHttpHeaderAction", +- "description": "Optional, additional actions that are performed on headers." ++ "description": "Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "kind": { + "default": "compute#securityPolicyRule", +@@ -74742,7 +76100,7 @@ + }, + "redirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action. Cannot be specified for any other actions." ++ "description": "Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "redirectTarget": { + "description": "This must be specified for redirect actions. Cannot be specified for any other actions.", +@@ -74824,7 +76182,11 @@ + }, + "expr": { + "$ref": "Expr", +- "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." ++ "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing `evaluateThreatIntelligence` require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies." ++ }, ++ "exprOptions": { ++ "$ref": "SecurityPolicyRuleMatcherExprOptions", ++ "description": "The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr')." + }, + "versionedExpr": { + "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", +@@ -74909,6 +76271,36 @@ + }, + "type": "object" + }, ++ "SecurityPolicyRuleMatcherExprOptions": { ++ "id": "SecurityPolicyRuleMatcherExprOptions", ++ "properties": { ++ "recaptchaOptions": { ++ "$ref": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", ++ "description": "reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field will have no effect." ++ } ++ }, ++ "type": "object" ++ }, ++ "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions": { ++ "id": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", ++ "properties": { ++ "actionTokenSiteKeys": { ++ "description": "A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "sessionTokenSiteKeys": { ++ "description": "A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "SecurityPolicyRuleNetworkMatcher": { + "description": "Represents a match condition that incoming network traffic is evaluated against.", + "id": "SecurityPolicyRuleNetworkMatcher", +@@ -75130,7 +76522,7 @@ + "type": "string" + }, + "exceedAction": { +- "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below.", ++ "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below. The `redirect` action is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + }, + "exceedActionRpcStatus": { +@@ -75139,7 +76531,7 @@ + }, + "exceedRedirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect." ++ "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "rateLimitThreshold": { + "$ref": "SecurityPolicyRuleRateLimitOptionsThreshold", +@@ -75299,7 +76691,7 @@ + "description": "The configuration needed to generate a signature for access to private storage buckets that support AWS's Signature Version 4 for authentication. Allowed only for INTERNET_IP_PORT and INTERNET_FQDN_PORT NEG backends." + }, + "clientTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted.", + "type": "string" + }, + "clientTlsSettings": { +@@ -75307,7 +76699,7 @@ + "description": "[Deprecated] TLS Settings for the backend service." + }, + "subjectAltNames": { +- "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact.", ++ "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode).", + "items": { + "type": "string" + }, +@@ -75416,7 +76808,7 @@ + "type": "object" + }, + "ServiceAttachment": { +- "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service. next tag = 20", ++ "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service.", + "id": "ServiceAttachment", + "properties": { + "connectedEndpoints": { +@@ -75513,6 +76905,10 @@ + "$ref": "Uint128", + "description": "[Output Only] An 128-bit global unique ID of the PSC service attachment." + }, ++ "reconcileConnections": { ++ "description": "This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. - If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . - If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. For newly created service attachment, this boolean defaults to true.", ++ "type": "boolean" ++ }, + "region": { + "description": "[Output Only] URL of the region where the service attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" +@@ -75954,7 +77350,7 @@ + "type": "object" + }, + "SetCommonInstanceMetadataOperationMetadata": { +- "description": "Encapsulates partial completion metadata for SetCommonInstanceMetadata. Will be propagated on Operation.metadata as per go/partial-completion-api-clean. See go/gce-aips/2822 for API council results.", ++ "description": "Encapsulates partial completion metadata for SetCommonInstanceMetadata. Will be propagated on Operation.metadata.", + "id": "SetCommonInstanceMetadataOperationMetadata", + "properties": { + "clientOperationId": { +@@ -76268,6 +77664,10 @@ + "format": "int64", + "type": "string" + }, ++ "enableConfidentialCompute": { ++ "description": "Whether this snapshot is created from a confidential compute mode disk. [Output Only]: This field is not set by user, but from source disk.", ++ "type": "boolean" ++ }, + "guestFlush": { + "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process.", + "type": "boolean" +@@ -76328,6 +77728,7 @@ + "name": { + "annotations": { + "required": [ ++ "compute.disks.createSnapshot", + "compute.snapshots.insert" + ] + }, +@@ -76371,6 +77772,10 @@ + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." + }, ++ "sourceDiskForRecoveryCheckpoint": { ++ "description": "The source disk whose recovery checkpoint will be used to create this snapshot.", ++ "type": "string" ++ }, + "sourceDiskId": { + "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name.", + "type": "string" +@@ -77562,303 +78967,807 @@ + }, + "type": "object" + }, +- "SslPolicy": { +- "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", +- "id": "SslPolicy", +- "properties": { +- "creationTimestamp": { +- "description": "[Output Only] Creation timestamp in RFC3339 text format.", +- "type": "string" +- }, +- "customFeatures": { +- "description": "A list of features enabled when the selected profile is CUSTOM. The method returns the set of features that can be specified in this list. This field must be empty if the profile is not CUSTOM.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- }, +- "description": { +- "description": "An optional description of this resource. Provide this property when you create the resource.", +- "type": "string" +- }, +- "enabledFeatures": { +- "description": "[Output Only] The list of features enabled in the SSL policy.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- }, +- "fingerprint": { +- "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a SslPolicy. An up-to-date fingerprint must be provided in order to update the SslPolicy, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an SslPolicy.", +- "format": "byte", +- "type": "string" +- }, +- "id": { +- "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", +- "format": "uint64", +- "type": "string" +- }, +- "kind": { +- "default": "compute#sslPolicy", +- "description": "[Output only] Type of the resource. Always compute#sslPolicyfor SSL policies.", +- "type": "string" +- }, +- "minTlsVersion": { +- "description": "The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. This can be one of TLS_1_0, TLS_1_1, TLS_1_2.", +- "enum": [ +- "TLS_1_0", +- "TLS_1_1", +- "TLS_1_2" +- ], +- "enumDescriptions": [ +- "TLS 1.0", +- "TLS 1.1", +- "TLS 1.2" +- ], +- "type": "string" +- }, +- "name": { +- "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- "type": "string" +- }, +- "profile": { +- "description": "Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This can be one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, the set of SSL features to enable must be specified in the customFeatures field.", +- "enum": [ +- "COMPATIBLE", +- "CUSTOM", +- "MODERN", +- "RESTRICTED" +- ], +- "enumDescriptions": [ +- "Compatible profile. Allows the broadset set of clients, even those which support only out-of-date SSL features to negotiate with the load balancer.", +- "Custom profile. Allow only the set of allowed SSL features specified in the customFeatures field.", +- "Modern profile. Supports a wide set of SSL features, allowing modern clients to negotiate SSL with the load balancer.", +- "Restricted profile. Supports a reduced set of SSL features, intended to meet stricter compliance requirements." +- ], +- "type": "string" +- }, +- "region": { +- "description": "[Output Only] URL of the region where the regional SSL policy resides. This field is not applicable to global SSL policies.", +- "type": "string" +- }, +- "selfLink": { +- "description": "[Output Only] Server-defined URL for the resource.", +- "type": "string" +- }, +- "selfLinkWithId": { +- "description": "[Output Only] Server-defined URL for this resource with the resource id.", +- "type": "string" +- }, +- "tlsSettings": { +- "$ref": "ServerTlsSettings", +- "description": "Security settings for the proxy. This field is only applicable to a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED." +- }, +- "warnings": { +- "description": "[Output Only] If potential misconfigurations are detected for this SSL policy, this field will be populated with warning messages.", +- "items": { +- "properties": { +- "code": { +- "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", +- "enum": [ +- "CLEANUP_FAILED", +- "DEPRECATED_RESOURCE_USED", +- "DEPRECATED_TYPE_USED", +- "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", +- "EXPERIMENTAL_TYPE_USED", +- "EXTERNAL_API_WARNING", +- "FIELD_VALUE_OVERRIDEN", +- "INJECTED_KERNELS_DEPRECATED", +- "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", +- "LARGE_DEPLOYMENT_WARNING", +- "MISSING_TYPE_DEPENDENCY", +- "NEXT_HOP_ADDRESS_NOT_ASSIGNED", +- "NEXT_HOP_CANNOT_IP_FORWARD", +- "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", +- "NEXT_HOP_INSTANCE_NOT_FOUND", +- "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", +- "NEXT_HOP_NOT_RUNNING", +- "NOT_CRITICAL_ERROR", +- "NO_RESULTS_ON_PAGE", +- "PARTIAL_SUCCESS", +- "REQUIRED_TOS_AGREEMENT", +- "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", +- "RESOURCE_NOT_DELETED", +- "SCHEMA_VALIDATION_IGNORED", +- "SINGLE_INSTANCE_PROPERTY_TEMPLATE", +- "UNDECLARED_PROPERTIES", +- "UNREACHABLE" +- ], +- "enumDescriptions": [ +- "Warning about failed cleanup of transient changes made by a failed operation.", +- "A link to a deprecated resource was created.", +- "When deploying and at least one of the resources has a type marked as deprecated", +- "The user created a boot disk that is larger than image size.", +- "When deploying and at least one of the resources has a type marked as experimental", +- "Warning that is present in an external api call", +- "Warning that value of a field has been overridden. Deprecated unused field.", +- "The operation involved use of an injected kernel, which is deprecated.", +- "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", +- "When deploying a deployment with a exceedingly large number of resources", +- "A resource depends on a missing type", +- "The route's nextHopIp address is not assigned to an instance on the network.", +- "The route's next hop instance cannot ip forward.", +- "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", +- "The route's nextHopInstance URL refers to an instance that does not exist.", +- "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", +- "The route's next hop instance does not have a status of RUNNING.", +- "Error which is not critical. We decided to continue the process despite the mentioned error.", +- "No results are present on a particular list page.", +- "Success is reported, but some results may be missing due to errors", +- "The user attempted to use a resource that requires a TOS they have not accepted.", +- "Warning that a resource is in use.", +- "One or more of the resources set to auto-delete could not be deleted because they were in use.", +- "When a resource schema validation is ignored.", +- "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", +- "When undeclared properties in the schema are present", +- "A given scope cannot be reached." +- ], +- "type": "string" +- }, +- "data": { +- "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", +- "items": { +- "properties": { +- "key": { +- "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", +- "type": "string" +- }, +- "value": { +- "description": "[Output Only] A warning data value corresponding to the key.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "type": "array" +- }, +- "message": { +- "description": "[Output Only] A human-readable description of the warning code.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "SslPolicyReference": { +- "id": "SslPolicyReference", +- "properties": { +- "sslPolicy": { +- "description": "URL of the SSL policy resource. Set this to empty string to clear any existing SSL policy associated with the target proxy resource.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "StatefulPolicy": { +- "id": "StatefulPolicy", +- "properties": { +- "preservedState": { +- "$ref": "StatefulPolicyPreservedState" +- } +- }, +- "type": "object" +- }, +- "StatefulPolicyPreservedState": { +- "description": "Configuration of preserved resources.", +- "id": "StatefulPolicyPreservedState", +- "properties": { +- "disks": { +- "additionalProperties": { +- "$ref": "StatefulPolicyPreservedStateDiskDevice" +- }, +- "description": "Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks.", +- "type": "object" +- }, +- "externalIPs": { +- "additionalProperties": { +- "$ref": "StatefulPolicyPreservedStateNetworkIp" +- }, +- "description": "External network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name.", +- "type": "object" +- }, +- "internalIPs": { +- "additionalProperties": { +- "$ref": "StatefulPolicyPreservedStateNetworkIp" +- }, +- "description": "Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name.", +- "type": "object" +- } +- }, +- "type": "object" +- }, +- "StatefulPolicyPreservedStateDiskDevice": { +- "id": "StatefulPolicyPreservedStateDiskDevice", +- "properties": { +- "autoDelete": { +- "description": "These stateful disks will never be deleted during autohealing, update or VM instance recreate operations. This flag is used to configure if the disk should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. Note: disks attached in READ_ONLY mode cannot be auto-deleted.", +- "enum": [ +- "NEVER", +- "ON_PERMANENT_INSTANCE_DELETION" +- ], +- "enumDescriptions": [ +- "", +- "" +- ], +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "StatefulPolicyPreservedStateNetworkIp": { +- "id": "StatefulPolicyPreservedStateNetworkIp", +- "properties": { +- "autoDelete": { +- "description": "These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted.", +- "enum": [ +- "NEVER", +- "ON_PERMANENT_INSTANCE_DELETION" +- ], +- "enumDescriptions": [ +- "", +- "" +- ], +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "Status": { +- "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", +- "id": "Status", +- "properties": { +- "code": { +- "description": "The status code, which should be an enum value of google.rpc.Code.", +- "format": "int32", +- "type": "integer" +- }, +- "details": { +- "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", +- "items": { +- "additionalProperties": { +- "description": "Properties of the object. Contains field @type with type URL.", +- "type": "any" +- }, +- "type": "object" +- }, +- "type": "array" +- }, +- "message": { +- "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", +- "type": "string" +- } +- }, +- "type": "object" +- }, ++ "SslPolicy": { ++ "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", ++ "id": "SslPolicy", ++ "properties": { ++ "creationTimestamp": { ++ "description": "[Output Only] Creation timestamp in RFC3339 text format.", ++ "type": "string" ++ }, ++ "customFeatures": { ++ "description": "A list of features enabled when the selected profile is CUSTOM. The method returns the set of features that can be specified in this list. This field must be empty if the profile is not CUSTOM.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "description": { ++ "description": "An optional description of this resource. Provide this property when you create the resource.", ++ "type": "string" ++ }, ++ "enabledFeatures": { ++ "description": "[Output Only] The list of features enabled in the SSL policy.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "fingerprint": { ++ "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a SslPolicy. An up-to-date fingerprint must be provided in order to update the SslPolicy, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an SslPolicy.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "kind": { ++ "default": "compute#sslPolicy", ++ "description": "[Output only] Type of the resource. Always compute#sslPolicyfor SSL policies.", ++ "type": "string" ++ }, ++ "minTlsVersion": { ++ "description": "The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. This can be one of TLS_1_0, TLS_1_1, TLS_1_2.", ++ "enum": [ ++ "TLS_1_0", ++ "TLS_1_1", ++ "TLS_1_2" ++ ], ++ "enumDescriptions": [ ++ "TLS 1.0", ++ "TLS 1.1", ++ "TLS 1.2" ++ ], ++ "type": "string" ++ }, ++ "name": { ++ "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "type": "string" ++ }, ++ "profile": { ++ "description": "Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This can be one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, the set of SSL features to enable must be specified in the customFeatures field.", ++ "enum": [ ++ "COMPATIBLE", ++ "CUSTOM", ++ "MODERN", ++ "RESTRICTED" ++ ], ++ "enumDescriptions": [ ++ "Compatible profile. Allows the broadset set of clients, even those which support only out-of-date SSL features to negotiate with the load balancer.", ++ "Custom profile. Allow only the set of allowed SSL features specified in the customFeatures field.", ++ "Modern profile. Supports a wide set of SSL features, allowing modern clients to negotiate SSL with the load balancer.", ++ "Restricted profile. Supports a reduced set of SSL features, intended to meet stricter compliance requirements." ++ ], ++ "type": "string" ++ }, ++ "region": { ++ "description": "[Output Only] URL of the region where the regional SSL policy resides. This field is not applicable to global SSL policies.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for the resource.", ++ "type": "string" ++ }, ++ "selfLinkWithId": { ++ "description": "[Output Only] Server-defined URL for this resource with the resource id.", ++ "type": "string" ++ }, ++ "tlsSettings": { ++ "$ref": "ServerTlsSettings", ++ "description": "Security settings for the proxy. This field is only applicable to a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED." ++ }, ++ "warnings": { ++ "description": "[Output Only] If potential misconfigurations are detected for this SSL policy, this field will be populated with warning messages.", ++ "items": { ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "SslPolicyReference": { ++ "id": "SslPolicyReference", ++ "properties": { ++ "sslPolicy": { ++ "description": "URL of the SSL policy resource. Set this to empty string to clear any existing SSL policy associated with the target proxy resource.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "StatefulPolicy": { ++ "id": "StatefulPolicy", ++ "properties": { ++ "preservedState": { ++ "$ref": "StatefulPolicyPreservedState" ++ } ++ }, ++ "type": "object" ++ }, ++ "StatefulPolicyPreservedState": { ++ "description": "Configuration of preserved resources.", ++ "id": "StatefulPolicyPreservedState", ++ "properties": { ++ "disks": { ++ "additionalProperties": { ++ "$ref": "StatefulPolicyPreservedStateDiskDevice" ++ }, ++ "description": "Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks.", ++ "type": "object" ++ }, ++ "externalIPs": { ++ "additionalProperties": { ++ "$ref": "StatefulPolicyPreservedStateNetworkIp" ++ }, ++ "description": "External network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name.", ++ "type": "object" ++ }, ++ "internalIPs": { ++ "additionalProperties": { ++ "$ref": "StatefulPolicyPreservedStateNetworkIp" ++ }, ++ "description": "Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name.", ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "StatefulPolicyPreservedStateDiskDevice": { ++ "id": "StatefulPolicyPreservedStateDiskDevice", ++ "properties": { ++ "autoDelete": { ++ "description": "These stateful disks will never be deleted during autohealing, update or VM instance recreate operations. This flag is used to configure if the disk should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. Note: disks attached in READ_ONLY mode cannot be auto-deleted.", ++ "enum": [ ++ "NEVER", ++ "ON_PERMANENT_INSTANCE_DELETION" ++ ], ++ "enumDescriptions": [ ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "StatefulPolicyPreservedStateNetworkIp": { ++ "id": "StatefulPolicyPreservedStateNetworkIp", ++ "properties": { ++ "autoDelete": { ++ "description": "These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted.", ++ "enum": [ ++ "NEVER", ++ "ON_PERMANENT_INSTANCE_DELETION" ++ ], ++ "enumDescriptions": [ ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "Status": { ++ "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", ++ "id": "Status", ++ "properties": { ++ "code": { ++ "description": "The status code, which should be an enum value of google.rpc.Code.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "details": { ++ "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", ++ "items": { ++ "additionalProperties": { ++ "description": "Properties of the object. Contains field @type with type URL.", ++ "type": "any" ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "StoragePool": { ++ "description": "Represents a zonal storage pool resource.", ++ "id": "StoragePool", ++ "properties": { ++ "creationTimestamp": { ++ "description": "[Output Only] Creation timestamp in RFC3339 text format.", ++ "type": "string" ++ }, ++ "description": { ++ "description": "An optional description of this resource. Provide this property when you create the resource.", ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "kind": { ++ "default": "compute#storagePool", ++ "description": "[Output Only] Type of the resource. Always compute#storagePool for storage pools.", ++ "type": "string" ++ }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this storage pool, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a storage pool.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels to apply to this storage pool. These can be later modified by the setLabels method.", ++ "type": "object" ++ }, ++ "name": { ++ "annotations": { ++ "required": [ ++ "compute.storagePools.insert" ++ ] ++ }, ++ "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "type": "string" ++ }, ++ "provisionedIops": { ++ "description": "Provsioned IOPS of the storage pool.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "resourceStatus": { ++ "$ref": "StoragePoolResourceStatus", ++ "description": "[Output Only] Status information for the storage pool resource." ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined fully-qualified URL for this resource.", ++ "type": "string" ++ }, ++ "selfLinkWithId": { ++ "description": "[Output Only] Server-defined URL for this resource's resource id.", ++ "type": "string" ++ }, ++ "sizeGb": { ++ "description": "Size, in GiB, of the storage pool.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "state": { ++ "description": "[Output Only] The status of storage pool creation. - CREATING: Storage pool is provisioning. storagePool. - FAILED: Storage pool creation failed. - READY: Storage pool is ready for use. - DELETING: Storage pool is deleting. ", ++ "enum": [ ++ "CREATING", ++ "DELETING", ++ "FAILED", ++ "READY" ++ ], ++ "enumDescriptions": [ ++ "StoragePool is provisioning", ++ "StoragePool is deleting.", ++ "StoragePool creation failed.", ++ "StoragePool is ready for use." ++ ], ++ "type": "string" ++ }, ++ "type": { ++ "description": "Type of the storage pool", ++ "enum": [ ++ "SSD", ++ "UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "", ++ "" ++ ], ++ "type": "string" ++ }, ++ "zone": { ++ "description": "[Output Only] URL of the zone where the storage pool resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "StoragePoolAggregatedList": { ++ "id": "StoragePoolAggregatedList", ++ "properties": { ++ "etag": { ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "additionalProperties": { ++ "$ref": "StoragePoolsScopedList", ++ "description": "[Output Only] Name of the scope containing this set of storage pool." ++ }, ++ "description": "A list of StoragePoolsScopedList resources.", ++ "type": "object" ++ }, ++ "kind": { ++ "default": "compute#storagePoolAggregatedList", ++ "description": "[Output Only] Type of resource. Always compute#storagePoolAggregatedList for aggregated lists of storage pools.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "unreachables": { ++ "description": "[Output Only] Unreachable resources.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "StoragePoolList": { ++ "description": "A list of StoragePool resources.", ++ "id": "StoragePoolList", ++ "properties": { ++ "etag": { ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "description": "A list of StoragePool resources.", ++ "items": { ++ "$ref": "StoragePool" ++ }, ++ "type": "array" ++ }, ++ "kind": { ++ "default": "compute#storagePoolList", ++ "description": "[Output Only] Type of resource. Always compute#storagePoolList for lists of storagePools.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "unreachables": { ++ "description": "[Output Only] Unreachable resources. end_interface: MixerListResponseWithEtagBuilder", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "StoragePoolResourceStatus": { ++ "description": "[Output Only] Contains output only fields.", ++ "id": "StoragePoolResourceStatus", ++ "properties": { ++ "aggregateDiskProvisionedIops": { ++ "description": "[Output Only] Sum of all the disk' provisioned IOPS.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "aggregateDiskSizeGb": { ++ "description": "[Output Only] Sum of all the capacity provisioned in disks in this storage pool. A disk's provisioned capacity is the same as its total capacity.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "lastResizeTimestamp": { ++ "description": "[Output Only] Timestamp of the last successful resize in RFC3339 text format.", ++ "type": "string" ++ }, ++ "maxAggregateDiskSizeGb": { ++ "description": "[Output Only] Maximum allowed aggregate disk size in gigabytes.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "numberOfDisks": { ++ "description": "[Output Only] Number of disks used.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "usedBytes": { ++ "description": "[Output Only] Space used by data stored in disks within the storage pool (in bytes).", ++ "format": "int64", ++ "type": "string" ++ }, ++ "usedReducedBytes": { ++ "description": "[Output Only] Space used by compressed and deduped data stored in disks within the storage pool (in bytes).", ++ "format": "int64", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "StoragePoolsScopedList": { ++ "id": "StoragePoolsScopedList", ++ "properties": { ++ "storagePools": { ++ "description": "[Output Only] A list of storage pool contained in this scope.", ++ "items": { ++ "$ref": "StoragePool" ++ }, ++ "type": "array" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning which replaces the list of storage pool when the list is empty.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, + "Subnetwork": { + "description": "Represents a Subnetwork resource. A subnetwork (also known as a subnet) is a logical partition of a Virtual Private Cloud network with one primary IP range and zero or more secondary IP ranges. For more information, read Virtual Private Cloud (VPC) Network.", + "id": "Subnetwork", +@@ -77896,7 +79805,7 @@ + "type": "string" + }, + "enableFlowLogs": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "enableL2": { +@@ -78003,7 +79912,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "AGGREGATE", + "CLOUD_EXTENSION", +@@ -78037,7 +79946,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -78370,7 +80279,7 @@ + "type": "string" + }, + "enable": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. Flow logging isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "filterExpr": { +@@ -78939,6 +80848,11 @@ + }, + "type": "array" + }, ++ "httpKeepAliveTimeoutSec": { ++ "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", ++ "format": "int32", ++ "type": "integer" ++ }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", +@@ -79330,7 +81244,7 @@ + "id": "TargetHttpsProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetHttpsProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetHttpsProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -79386,7 +81300,7 @@ + "type": "string" + }, + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -79409,6 +81323,11 @@ + }, + "type": "array" + }, ++ "httpKeepAliveTimeoutSec": { ++ "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", ++ "format": "int32", ++ "type": "integer" ++ }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", +@@ -79455,7 +81374,7 @@ + "type": "string" + }, + "serverTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted.", + "type": "string" + }, + "sslCertificates": { +@@ -80670,7 +82589,7 @@ + "id": "TargetSslProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetSslProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetSslProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -80712,7 +82631,7 @@ + "id": "TargetSslProxy", + "properties": { + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -82663,7 +84582,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "AGGREGATE", + "CLOUD_EXTENSION", +@@ -82689,7 +84608,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -83171,11 +85090,13 @@ + "description": "The stack type for this VPN gateway to identify the IP protocols that are enabled. Possible values are: IPV4_ONLY, IPV4_IPV6. If not specified, IPV4_ONLY will be used.", + "enum": [ + "IPV4_IPV6", +- "IPV4_ONLY" ++ "IPV4_ONLY", ++ "IPV6_ONLY" + ], + "enumDescriptions": [ + "Enable VPN gateway with both IPv4 and IPv6 protocols.", +- "Enable VPN gateway with only IPv4 protocol." ++ "Enable VPN gateway with only IPv4 protocol.", ++ "Enable VPN gateway with only IPv6 protocol." + ], + "type": "string" + }, +@@ -83488,7 +85409,7 @@ + "type": "integer" + }, + "peerGatewayInterface": { +- "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or GCP VPN gateway.", ++ "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "format": "uint32", + "type": "integer" + }, +@@ -83500,7 +85421,7 @@ + "type": "object" + }, + "VpnGatewayStatusVpnConnection": { +- "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be a external VPN gateway or GCP VPN gateway.", ++ "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "id": "VpnGatewayStatusVpnConnection", + "properties": { + "peerExternalGateway": { +diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +index 0b34f12ea5d..a0afc85bb79 100644 +--- a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go ++++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +@@ -75,6 +75,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "compute:alpha" + const apiName = "compute" +@@ -228,6 +229,7 @@ func New(client *http.Client) (*Service, error) { + s.Snapshots = NewSnapshotsService(s) + s.SslCertificates = NewSslCertificatesService(s) + s.SslPolicies = NewSslPoliciesService(s) ++ s.StoragePools = NewStoragePoolsService(s) + s.Subnetworks = NewSubnetworksService(s) + s.TargetGrpcProxies = NewTargetGrpcProxiesService(s) + s.TargetHttpProxies = NewTargetHttpProxiesService(s) +@@ -419,6 +421,8 @@ type Service struct { + + SslPolicies *SslPoliciesService + ++ StoragePools *StoragePoolsService ++ + Subnetworks *SubnetworksService + + TargetGrpcProxies *TargetGrpcProxiesService +@@ -1213,6 +1217,15 @@ type SslPoliciesService struct { + s *Service + } + ++func NewStoragePoolsService(s *Service) *StoragePoolsService { ++ rs := &StoragePoolsService{s: s} ++ return rs ++} ++ ++type StoragePoolsService struct { ++ s *Service ++} ++ + func NewSubnetworksService(s *Service) *SubnetworksService { + rs := &SubnetworksService{s: s} + return rs +@@ -2066,32 +2079,35 @@ func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + // AccessConfig: An access configuration attached to an instance's + // network interface. Only one access config per instance is supported. + type AccessConfig struct { +- // ExternalIpv6: The first IPv6 address of the external IPv6 range +- // associated with this instance, prefix length is stored in +- // externalIpv6PrefixLength in ipv6AccessConfig. To use a static +- // external IP address, it must be unused and in the same region as the +- // instance's zone. If not specified, Google Cloud will automatically +- // assign an external IPv6 address from the instance's subnetwork. ++ // ExternalIpv6: Applies to ipv6AccessConfigs only. The first IPv6 ++ // address of the external IPv6 range associated with this instance, ++ // prefix length is stored in externalIpv6PrefixLength in ++ // ipv6AccessConfig. To use a static external IP address, it must be ++ // unused and in the same region as the instance's zone. If not ++ // specified, Google Cloud will automatically assign an external IPv6 ++ // address from the instance's subnetwork. + ExternalIpv6 string `json:"externalIpv6,omitempty"` + +- // ExternalIpv6PrefixLength: The prefix length of the external IPv6 +- // range. ++ // ExternalIpv6PrefixLength: Applies to ipv6AccessConfigs only. The ++ // prefix length of the external IPv6 range. + ExternalIpv6PrefixLength int64 `json:"externalIpv6PrefixLength,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#accessConfig + // for access configs. + Kind string `json:"kind,omitempty"` + +- // Name: The name of this access configuration. The default and +- // recommended name is External NAT, but you can use any arbitrary +- // string, such as My external IP or Network Access. ++ // Name: The name of this access configuration. In accessConfigs (IPv4), ++ // the default and recommended name is External NAT, but you can use any ++ // arbitrary string, such as My external IP or Network Access. In ++ // ipv6AccessConfigs, the recommend name is External IPv6. + Name string `json:"name,omitempty"` + +- // NatIP: An external IP address associated with this instance. Specify +- // an unused static external IP address available to the project or +- // leave this field undefined to use an IP from a shared ephemeral IP +- // address pool. If you specify a static external IP address, it must +- // live in the same region as the zone of the instance. ++ // NatIP: Applies to accessConfigs (IPv4) only. An external IP address ++ // associated with this instance. Specify an unused static external IP ++ // address available to the project or leave this field undefined to use ++ // an IP from a shared ephemeral IP address pool. If you specify a ++ // static external IP address, it must live in the same region as the ++ // zone of the instance. + NatIP string `json:"natIP,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -2141,12 +2157,13 @@ type AccessConfig struct { + // associated. + SetPublicPtr bool `json:"setPublicPtr,omitempty"` + +- // Type: The type of configuration. The default and only option is +- // ONE_TO_ONE_NAT. ++ // Type: The type of configuration. In accessConfigs (IPv4), the default ++ // and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default ++ // and only option is DIRECT_IPV6. + // + // Possible values: + // "DIRECT_IPV6" +- // "ONE_TO_ONE_NAT" (default) ++ // "ONE_TO_ONE_NAT" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExternalIpv6") to +@@ -3060,6 +3077,17 @@ type AllocationAggregateReservation struct { + // "VM_FAMILY_MEMORY_OPTIMIZED_M3" + VmFamily string `json:"vmFamily,omitempty"` + ++ // WorkloadType: The workload type of the instances that will target ++ // this reservation. ++ // ++ // Possible values: ++ // "BATCH" - Reserved resources will be optimized for BATCH workloads, ++ // such as ML training. ++ // "SERVING" - Reserved resources will be optimized for SERVING ++ // workloads, such as ML inference. ++ // "UNSPECIFIED" ++ WorkloadType string `json:"workloadType,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "InUseResources") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -3594,6 +3622,10 @@ type AttachedDiskInitializeParams struct { + // example: pd-standard. + DiskType string `json:"diskType,omitempty"` + ++ // EnableConfidentialCompute: Whether this disk is using confidential ++ // compute mode. ++ EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` ++ + // GuestOsFeatures: A list of features to enable on the guest operating + // system. Applicable only for bootable images. Read Enabling guest + // operating system features to see a list of available options. Guest +@@ -3717,6 +3749,15 @@ type AttachedDiskInitializeParams struct { + // the source snapshot. + SourceSnapshotEncryptionKey *CustomerEncryptionKey `json:"sourceSnapshotEncryptionKey,omitempty"` + ++ // StoragePool: The storage pool in which the new disk is created. You ++ // can provide this as a partial or full URL to the resource. For ++ // example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /storagePools/storagePool - ++ // projects/project/zones/zone/storagePools/storagePool - ++ // zones/zone/storagePools/storagePool ++ StoragePool string `json:"storagePool,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "Architecture") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -4765,15 +4806,17 @@ func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { + + // AutoscalingPolicy: Cloud Autoscaler policy. + type AutoscalingPolicy struct { +- // CoolDownPeriodSec: The number of seconds that the autoscaler waits +- // before it starts collecting information from a new instance. This +- // prevents the autoscaler from collecting information when the instance +- // is initializing, during which the collected usage would not be +- // reliable. The default time autoscaler waits is 60 seconds. Virtual +- // machine initialization times might vary because of numerous factors. +- // We recommend that you test how long an instance may take to +- // initialize. To do this, create an instance and time the startup +- // process. ++ // CoolDownPeriodSec: The number of seconds that your application takes ++ // to initialize on a VM instance. This is referred to as the ++ // initialization period (/compute/docs/autoscaler#cool_down_period). ++ // Specifying an accurate initialization period improves autoscaler ++ // decisions. For example, when scaling out, the autoscaler ignores data ++ // from VMs that are still initializing because those VMs might not yet ++ // represent normal usage of your application. The default ++ // initialization period is 60 seconds. Initialization periods might ++ // vary because of numerous factors. We recommend that you test how long ++ // your application takes to initialize. To do this, create a VM and ++ // time your application's startup process. + CoolDownPeriodSec int64 `json:"coolDownPeriodSec,omitempty"` + + // CpuUtilization: Defines the CPU utilization policy that allows the +@@ -4801,7 +4844,12 @@ type AutoscalingPolicy struct { + // instances allowed. + MinNumReplicas int64 `json:"minNumReplicas,omitempty"` + +- // Mode: Defines operating mode for this policy. ++ // Mode: Defines the operating mode for this policy. The following modes ++ // are available: - OFF: Disables the autoscaler but maintains its ++ // configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM ++ // instances only. - ON: Enables all autoscaler activities according to ++ // its policy. For more information, see "Turning off or restricting an ++ // autoscaler" + // + // Possible values: + // "OFF" - Do not automatically scale the MIG in or out. The +@@ -6184,6 +6232,10 @@ type BackendService struct { + // loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED. + MaxStreamDuration *Duration `json:"maxStreamDuration,omitempty"` + ++ // Metadatas: Deployment metadata associated with the resource to be set ++ // by a GKE hub controller and read by the backend RCTH ++ Metadatas map[string]string `json:"metadatas,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -8076,9 +8128,6 @@ type BulkInsertInstanceResource struct { + // Count: The maximum number of instances to create. + Count int64 `json:"count,omitempty,string"` + +- // Instance: DEPRECATED: Please use instance_properties instead. +- Instance *Instance `json:"instance,omitempty"` +- + // InstanceProperties: The instance properties defining the VM instances + // to be created. Required if sourceInstanceTemplate is not provided. + InstanceProperties *InstanceProperties `json:"instanceProperties,omitempty"` +@@ -8151,11 +8200,15 @@ func (s *BulkInsertInstanceResource) MarshalJSON() ([]byte, error) { + // properties to be set on individual instances. To be extended in the + // future. + type BulkInsertInstanceResourcePerInstanceProperties struct { ++ // Hostname: Specifies the hostname of the instance. More details in: ++ // https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention ++ Hostname string `json:"hostname,omitempty"` ++ + // Name: This field is only temporary. It will be removed. Do not use + // it. + Name string `json:"name,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "Name") to ++ // ForceSendFields is a list of field names (e.g. "Hostname") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -8163,8 +8216,8 @@ type BulkInsertInstanceResourcePerInstanceProperties struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Name") to include in API +- // requests with the JSON null value. By default, fields with empty ++ // NullFields is a list of field names (e.g. "Hostname") to include in ++ // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. +@@ -8178,6 +8231,57 @@ func (s *BulkInsertInstanceResourcePerInstanceProperties) MarshalJSON() ([]byte, + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type BulkInsertOperationStatus struct { ++ // CreatedVmCount: [Output Only] Count of VMs successfully created so ++ // far. ++ CreatedVmCount int64 `json:"createdVmCount,omitempty"` ++ ++ // DeletedVmCount: [Output Only] Count of VMs that got deleted during ++ // rollback. ++ DeletedVmCount int64 `json:"deletedVmCount,omitempty"` ++ ++ // FailedToCreateVmCount: [Output Only] Count of VMs that started ++ // creating but encountered an error. ++ FailedToCreateVmCount int64 `json:"failedToCreateVmCount,omitempty"` ++ ++ // Status: [Output Only] Creation status of BulkInsert operation - ++ // information if the flow is rolling forward or rolling back. ++ // ++ // Possible values: ++ // "CREATING" - Rolling forward - creating VMs. ++ // "DONE" - Done ++ // "ROLLING_BACK" - Rolling back - cleaning up after an error. ++ // "STATUS_UNSPECIFIED" ++ Status string `json:"status,omitempty"` ++ ++ // TargetVmCount: [Output Only] Count of VMs originally planned to be ++ // created. ++ TargetVmCount int64 `json:"targetVmCount,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "CreatedVmCount") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "CreatedVmCount") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *BulkInsertOperationStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod BulkInsertOperationStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type BundledLocalSsds struct { + // DefaultInterface: The default disk interface if the interface is not + // specified. +@@ -8588,6 +8692,10 @@ type Commitment struct { + // Reservations: List of reservations in this commitment. + Reservations []*Reservation `json:"reservations,omitempty"` + ++ // ResourceStatus: [Output Only] Status information for Commitment ++ // resource. ++ ResourceStatus *CommitmentResourceStatus `json:"resourceStatus,omitempty"` ++ + // Resources: A list of commitment amounts for particular resources. + // Note that VCPU and MEMORY resource commitments must occur together. + Resources []*ResourceCommitment `json:"resources,omitempty"` +@@ -8599,7 +8707,7 @@ type Commitment struct { + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + +- // SplitSourceCommitment: Source commitment to be splitted into a new ++ // SplitSourceCommitment: Source commitment to be split into a new + // commitment. + SplitSourceCommitment string `json:"splitSourceCommitment,omitempty"` + +@@ -8613,6 +8721,8 @@ type Commitment struct { + // + // Possible values: + // "ACTIVE" ++ // "CANCELED_EARLY_TERMINATION" ++ // "CANCELING" + // "CANCELLED" - Deprecate CANCELED status. Will use separate status + // to differentiate cancel by mergeCud or manual cancellation. + // "CREATING" +@@ -8635,6 +8745,7 @@ type Commitment struct { + // "COMPUTE_OPTIMIZED" + // "COMPUTE_OPTIMIZED_C2D" + // "COMPUTE_OPTIMIZED_C3" ++ // "COMPUTE_OPTIMIZED_C3D" + // "GENERAL_PURPOSE" + // "GENERAL_PURPOSE_E2" + // "GENERAL_PURPOSE_N2" +@@ -9054,6 +9165,82 @@ func (s *CommitmentListWarningData) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// CommitmentResourceStatus: [Output Only] Contains output only fields. ++type CommitmentResourceStatus struct { ++ // CancellationInformation: [Output Only] An optional, contains all the ++ // needed information of cancellation. ++ CancellationInformation *CommitmentResourceStatusCancellationInformation `json:"cancellationInformation,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "CancellationInformation") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "CancellationInformation") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *CommitmentResourceStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod CommitmentResourceStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type CommitmentResourceStatusCancellationInformation struct { ++ // CanceledCommitment: [Output Only] An optional amount of CUDs canceled ++ // so far in the last 365 days. ++ CanceledCommitment *Money `json:"canceledCommitment,omitempty"` ++ ++ // CanceledCommitmentLastUpdatedTimestamp: [Output Only] An optional ++ // last update time of canceled_commitment. RFC3339 text format. ++ CanceledCommitmentLastUpdatedTimestamp string `json:"canceledCommitmentLastUpdatedTimestamp,omitempty"` ++ ++ // CancellationCap: [Output Only] An optional,the cancellation cap for ++ // how much commitments can be canceled in a rolling 365 per billing ++ // account. ++ CancellationCap *Money `json:"cancellationCap,omitempty"` ++ ++ // CancellationFee: [Output Only] An optional, cancellation fee. ++ CancellationFee *Money `json:"cancellationFee,omitempty"` ++ ++ // CancellationFeeExpirationTimestamp: [Output Only] An optional, ++ // cancellation fee expiration time. RFC3339 text format. ++ CancellationFeeExpirationTimestamp string `json:"cancellationFeeExpirationTimestamp,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "CanceledCommitment") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "CanceledCommitment") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *CommitmentResourceStatusCancellationInformation) MarshalJSON() ([]byte, error) { ++ type NoMethod CommitmentResourceStatusCancellationInformation ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type CommitmentsScopedList struct { + // Commitments: [Output Only] A list of commitments contained in this + // scope. +@@ -9302,6 +9489,7 @@ type ConfidentialInstanceConfig struct { + // "SEV" - AMD Secure Encrypted Virtualization. + // "SEV_SNP" - AMD Secure Encrypted Virtualization - Secure Nested + // Paging. ++ // "TDX" - Intel Trust Domain eXtension. + ConfidentialInstanceType string `json:"confidentialInstanceType,omitempty"` + + // EnableConfidentialCompute: Defines whether the instance should have +@@ -9851,6 +10039,10 @@ type Disk struct { + // provide a key to use the disk later. + DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + ++ // EnableConfidentialCompute: Whether this disk is using confidential ++ // compute mode. ++ EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` ++ + // EraseWindowsVssSignature: Specifies whether the disk restored from a + // source snapshot should erase Windows specific VSS signature. + EraseWindowsVssSignature bool `json:"eraseWindowsVssSignature,omitempty"` +@@ -10120,6 +10312,15 @@ type Disk struct { + // "RESTORING" - Source data is being copied into the disk. + Status string `json:"status,omitempty"` + ++ // StoragePool: The storage pool in which the new disk is created. You ++ // can provide this as a partial or full URL to the resource. For ++ // example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /storagePools/storagePool - ++ // projects/project/zones/zone/storagePools/storagePool - ++ // zones/zone/storagePools/storagePool ++ StoragePool string `json:"storagePool,omitempty"` ++ + // StorageType: [Deprecated] Storage type of the persistent disk. + // + // Possible values: +@@ -10369,6 +10570,16 @@ func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { + } + + type DiskAsyncReplication struct { ++ // ConsistencyGroupPolicy: [Output Only] URL of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicy string `json:"consistencyGroupPolicy,omitempty"` ++ ++ // ConsistencyGroupPolicyId: [Output Only] ID of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicyId string `json:"consistencyGroupPolicyId,omitempty"` ++ + // Disk: The other disk asynchronously replicated to or from the current + // disk. You can provide this as a partial or full URL to the resource. + // For example, the following are valid values: - +@@ -10385,20 +10596,22 @@ type DiskAsyncReplication struct { + // identify the exact version of the disk that was used. + DiskId string `json:"diskId,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "Disk") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. ++ // ForceSendFields is a list of field names (e.g. ++ // "ConsistencyGroupPolicy") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Disk") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. ++ // NullFields is a list of field names (e.g. "ConsistencyGroupPolicy") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. + NullFields []string `json:"-"` + } + +@@ -10783,6 +10996,11 @@ type DiskResourceStatus struct { + // AsyncSecondaryDisks: Key: disk, value: AsyncReplicationStatus message + AsyncSecondaryDisks map[string]DiskResourceStatusAsyncReplicationStatus `json:"asyncSecondaryDisks,omitempty"` + ++ // UsedBytes: [Output Only] Space used by data stored in the disk (in ++ // bytes). Note that this field is set only when the disk is in a ++ // storage pool. ++ UsedBytes int64 `json:"usedBytes,omitempty,string"` ++ + // ForceSendFields is a list of field names (e.g. "AsyncPrimaryDisk") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -13399,6 +13617,14 @@ type FirewallPolicyAssociation struct { + // Name: The name for an association. + Name string `json:"name,omitempty"` + ++ // Priority: An integer indicating the priority of an association. The ++ // priority must be a positive value between 1 and 2147483647. Firewall ++ // Policies are evaluated from highest to lowest priority where 1 is the ++ // highest priority and 2147483647 is the lowest priority. The default ++ // value is `1000`. If two associations have the same priority then ++ // lexicographical order on association names is applied. ++ Priority int64 `json:"priority,omitempty"` ++ + // ShortName: [Output Only] The short name of the firewall policy of the + // association. + ShortName string `json:"shortName,omitempty"` +@@ -14125,9 +14351,10 @@ type ForwardingRule struct { + // Network: This field is not used for external load balancing. For + // Internal TCP/UDP Load Balancing, this field identifies the network + // that the load balanced IP should belong to for this Forwarding Rule. +- // If this field is not specified, the default network will be used. For +- // Private Service Connect forwarding rules that forward traffic to +- // Google APIs, a network must be provided. ++ // If the subnetwork is specified, the network of the subnetwork will be ++ // used. If neither subnetwork nor this field is specified, the default ++ // network will be used. For Private Service Connect forwarding rules ++ // that forward traffic to Google APIs, a network must be provided. + Network string `json:"network,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -15051,6 +15278,19 @@ func (s *FutureReservationSpecificSKUProperties) MarshalJSON() ([]byte, error) { + // FutureReservationStatus: [Output only] Represents status related to + // the future reservation. + type FutureReservationStatus struct { ++ // AmendmentStatus: The current status of the requested amendment. ++ // ++ // Possible values: ++ // "AMENDMENT_APPROVED" - The requested amendment to the Future ++ // Resevation has been approved and applied by GCP. ++ // "AMENDMENT_DECLINED" - The requested amendment to the Future ++ // Reservation has been declined by GCP and the original state was ++ // restored. ++ // "AMENDMENT_IN_REVIEW" - The requested amendment to the Future ++ // Reservation is currently being reviewd by GCP. ++ // "AMENDMENT_STATUS_UNSPECIFIED" ++ AmendmentStatus string `json:"amendmentStatus,omitempty"` ++ + // AutoCreatedReservations: Fully qualified urls of the automatically + // created reservations at start_time. + AutoCreatedReservations []string `json:"autoCreatedReservations,omitempty"` +@@ -15060,6 +15300,13 @@ type FutureReservationStatus struct { + // capacity delivered as part of existing matching reservations. + FulfilledCount int64 `json:"fulfilledCount,omitempty,string"` + ++ // LastKnownGoodState: This field represents the future reservation ++ // before an amendment was requested. If the amendment is declined, the ++ // Future Reservation will be reverted to the last known good state. The ++ // last known good state is not set when updating a future reservation ++ // whose Procurement Status is DRAFTING. ++ LastKnownGoodState *FutureReservationStatusLastKnownGoodState `json:"lastKnownGoodState,omitempty"` ++ + // LockTime: Time when Future Reservation would become LOCKED, after + // which no modifications to Future Reservation will be allowed. + // Applicable only after the Future Reservation is in the APPROVED +@@ -15082,6 +15329,9 @@ type FutureReservationStatus struct { + // fulfilled. Additional reservations were provided but did not reach + // total_count reserved instance slots. + // "FULFILLED" - Future reservation is fulfilled completely. ++ // "PENDING_AMENDMENT_APPROVAL" - An Amendment to the Future ++ // Reservation has been requested. If the Amendment is declined, the ++ // Future Reservation will be restored to the last known good state. + // "PENDING_APPROVAL" - Future reservation is pending approval by GCP. + // "PROCUREMENT_STATUS_UNSPECIFIED" + // "PROCURING" - Future reservation is being procured by GCP. Beyond +@@ -15095,19 +15345,18 @@ type FutureReservationStatus struct { + + SpecificSkuProperties *FutureReservationStatusSpecificSKUProperties `json:"specificSkuProperties,omitempty"` + +- // ForceSendFields is a list of field names (e.g. +- // "AutoCreatedReservations") to unconditionally include in API +- // requests. By default, fields with empty or default values are omitted +- // from API requests. However, any non-pointer, non-interface field +- // appearing in ForceSendFields will be sent to the server regardless of +- // whether the field is empty or not. This may be used to include empty +- // fields in Patch requests. ++ // ForceSendFields is a list of field names (e.g. "AmendmentStatus") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "AutoCreatedReservations") +- // to include in API requests with the JSON null value. By default, +- // fields with empty values are omitted from API requests. However, any +- // field with an empty value appearing in NullFields will be sent to the ++ // NullFields is a list of field names (e.g. "AmendmentStatus") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. +@@ -15120,6 +15369,108 @@ func (s *FutureReservationStatus) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// FutureReservationStatusLastKnownGoodState: The state that the future ++// reservation will be reverted to should the amendment be declined. ++type FutureReservationStatusLastKnownGoodState struct { ++ // Description: The description of the FutureReservation before an ++ // amendment was requested. ++ Description string `json:"description,omitempty"` ++ ++ FutureReservationSpecs *FutureReservationStatusLastKnownGoodStateFutureReservationSpecs `json:"futureReservationSpecs,omitempty"` ++ ++ // NamePrefix: The name prefix of the Future Reservation before an ++ // amendment was requested. ++ NamePrefix string `json:"namePrefix,omitempty"` ++ ++ // ProcurementStatus: The status of the last known good state for the ++ // Future Reservation. ++ // ++ // Possible values: ++ // "APPROVED" - Future reservation is approved by GCP. ++ // "CANCELLED" - Future reservation is cancelled by the customer. ++ // "COMMITTED" - Future reservation is committed by the customer. ++ // "DECLINED" - Future reservation is rejected by GCP. ++ // "DRAFTING" - Related status for PlanningStatus.Draft. Transitions ++ // to PENDING_APPROVAL upon user submitting FR. ++ // "FAILED" - Future reservation failed. No additional reservations ++ // were provided. ++ // "FAILED_PARTIALLY_FULFILLED" - Future reservation is partially ++ // fulfilled. Additional reservations were provided but did not reach ++ // total_count reserved instance slots. ++ // "FULFILLED" - Future reservation is fulfilled completely. ++ // "PENDING_AMENDMENT_APPROVAL" - An Amendment to the Future ++ // Reservation has been requested. If the Amendment is declined, the ++ // Future Reservation will be restored to the last known good state. ++ // "PENDING_APPROVAL" - Future reservation is pending approval by GCP. ++ // "PROCUREMENT_STATUS_UNSPECIFIED" ++ // "PROCURING" - Future reservation is being procured by GCP. Beyond ++ // this point, Future reservation is locked and no further modifications ++ // are allowed. ++ // "PROVISIONING" - Future reservation capacity is being provisioned. ++ // This state will be entered after start_time, while reservations are ++ // being created to provide total_count reserved instance slots. This ++ // state will not persist past start_time + 24h. ++ ProcurementStatus string `json:"procurementStatus,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Description") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Description") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *FutureReservationStatusLastKnownGoodState) MarshalJSON() ([]byte, error) { ++ type NoMethod FutureReservationStatusLastKnownGoodState ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// FutureReservationStatusLastKnownGoodStateFutureReservationSpecs: The ++// properties of the last known good state for the Future Reservation. ++type FutureReservationStatusLastKnownGoodStateFutureReservationSpecs struct { ++ // ShareSettings: The previous share settings of the Future Reservation. ++ ShareSettings *ShareSettings `json:"shareSettings,omitempty"` ++ ++ // SpecificSkuProperties: The previous instance related properties of ++ // the Future Reservation. ++ SpecificSkuProperties *FutureReservationSpecificSKUProperties `json:"specificSkuProperties,omitempty"` ++ ++ // TimeWindow: The previous time window of the Future Reservation. ++ TimeWindow *FutureReservationTimeWindow `json:"timeWindow,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ShareSettings") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ShareSettings") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *FutureReservationStatusLastKnownGoodStateFutureReservationSpecs) MarshalJSON() ([]byte, error) { ++ type NoMethod FutureReservationStatusLastKnownGoodStateFutureReservationSpecs ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // FutureReservationStatusSpecificSKUProperties: Properties to be set + // for the Future Reservation. + type FutureReservationStatusSpecificSKUProperties struct { +@@ -16211,8 +16562,8 @@ type GuestOsFeature struct { + // commas to separate values. Set to one or more of the following + // values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - + // UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - +- // SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling +- // guest operating system features. ++ // SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see ++ // Enabling guest operating system features. + // + // Possible values: + // "BARE_METAL_LINUX_COMPATIBLE" +@@ -16223,6 +16574,7 @@ type GuestOsFeature struct { + // "SEV_CAPABLE" + // "SEV_LIVE_MIGRATABLE" + // "SEV_SNP_CAPABLE" ++ // "TDX_CAPABLE" + // "UEFI_COMPATIBLE" + // "VIRTIO_SCSI_MULTIQUEUE" + // "WINDOWS" +@@ -16587,12 +16939,12 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { + // (/compute/docs/reference/rest/alpha/regionHealthChecks) Internal + // HTTP(S) load balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Traffic Director must use global +-// health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load ++// health checks (`compute.v1.healthChecks`). Internal TCP/UDP load + // balancers can use either regional or global health checks +-// (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). ++// (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). + // External HTTP(S), TCP proxy, and SSL proxy load balancers as well as + // managed instance group auto-healing must use global health checks +-// (`compute.v1.HealthChecks`). Backend service-based network load ++// (`compute.v1.healthChecks`). Backend service-based network load + // balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Target pool-based network load + // balancers must use legacy HTTP health checks +@@ -18074,7 +18426,7 @@ type HealthStatus struct { + // instance. + ForwardingRuleIp string `json:"forwardingRuleIp,omitempty"` + +- // HealthState: Health state of the instance. ++ // HealthState: Health state of the IPv4 address of the instance. + // + // Possible values: + // "HEALTHY" +@@ -20595,9 +20947,9 @@ type Instance struct { + // cycle. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -21468,6 +21820,11 @@ type InstanceGroupManager struct { + // server generates this identifier. + Id uint64 `json:"id,omitempty,string"` + ++ // InstanceFlexibilityPolicy: Instance flexibility allowing MIG to ++ // create VMs from multiple types of machines. Instance flexibility ++ // configuration on MIG overrides instance template configuration. ++ InstanceFlexibilityPolicy *InstanceGroupManagerInstanceFlexibilityPolicy `json:"instanceFlexibilityPolicy,omitempty"` ++ + // InstanceGroup: [Output Only] The URL of the Instance Group resource. + InstanceGroup string `json:"instanceGroup,omitempty"` + +@@ -21548,6 +21905,13 @@ type InstanceGroupManager struct { + // Resizing the group also changes this number. + TargetSize int64 `json:"targetSize,omitempty"` + ++ // TargetSizeUnit: The unit of measure for the target size. ++ // ++ // Possible values: ++ // "VCPU" - TargetSize is the target count of vCPUs of VMs. ++ // "VM" - [Default] TargetSize is the target number of VMs. ++ TargetSizeUnit string `json:"targetSizeUnit,omitempty"` ++ + // TargetStoppedSize: The target number of stopped instances for this + // managed instance group. This number changes when you: - Stop instance + // using the stopInstances method or start instances using the +@@ -21949,13 +22313,14 @@ type InstanceGroupManagerAutoHealingPolicy struct { + // HealthCheck: The URL for the health check that signals autohealing. + HealthCheck string `json:"healthCheck,omitempty"` + +- // InitialDelaySec: The number of seconds that the managed instance +- // group waits before it applies autohealing policies to new instances +- // or recently recreated instances. This initial delay allows instances +- // to initialize and run their startup scripts before the instance group +- // determines that they are UNHEALTHY. This prevents the managed +- // instance group from recreating its instances prematurely. This value +- // must be from range [0, 3600]. ++ // InitialDelaySec: The initial delay is the number of seconds that a ++ // new VM takes to initialize and run its startup script. During a VM's ++ // initial delay period, the MIG ignores unsuccessful health checks ++ // because the VM might be in the startup process. This prevents the MIG ++ // from prematurely recreating a VM. If the health check receives a ++ // healthy response during the initial delay, it indicates that the ++ // startup process is complete and the VM is ready. The value of initial ++ // delay must be between 0 and 3600 seconds. The default value is 0. + InitialDelaySec int64 `json:"initialDelaySec,omitempty"` + + // MaxUnavailable: Maximum number of instances that can be unavailable +@@ -22038,6 +22403,70 @@ func (s *InstanceGroupManagerAutoHealingPolicyAutoHealingTriggers) MarshalJSON() + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InstanceGroupManagerInstanceFlexibilityPolicy struct { ++ // InstanceSelectionLists: List of instance selection options that the ++ // group will use when creating new VMs. ++ InstanceSelectionLists map[string]InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection `json:"instanceSelectionLists,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "InstanceSelectionLists") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "InstanceSelectionLists") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstanceGroupManagerInstanceFlexibilityPolicy) MarshalJSON() ([]byte, error) { ++ type NoMethod InstanceGroupManagerInstanceFlexibilityPolicy ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection struct { ++ // MachineTypes: Full machine-type names, e.g. "n1-standard-16". ++ MachineTypes []string `json:"machineTypes,omitempty"` ++ ++ // Rank: Preference of this instance selection. Lower number means ++ // higher preference. MIG will first try to create a VM based on the ++ // machine-type with lowest rank and fallback to next rank based on ++ // availability. Machine types and instance selections with the same ++ // rank have the same preference. ++ Rank int64 `json:"rank,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "MachineTypes") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "MachineTypes") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection) MarshalJSON() ([]byte, error) { ++ type NoMethod InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type InstanceGroupManagerInstanceLifecyclePolicy struct { + // DefaultActionOnFailure: Defines behaviour for all instance or + // failures +@@ -23297,7 +23726,9 @@ func (s *InstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, err + type InstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The URLs of one or more instances to delete. This can be a + // full URL or a partial URL, such as +- // zones/[ZONE]/instances/[INSTANCE_NAME]. ++ // zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have ++ // URL and can be deleted only by name. One cannot specify both URLs and ++ // names in a single request. + Instances []string `json:"instances,omitempty"` + + // SkipInstancesOnValidationError: Specifies whether the request should +@@ -26178,9 +26609,9 @@ type InstanceWithNamedPorts struct { + // Status: [Output Only] The status of the instance. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -26246,6 +26677,35 @@ func (s *InstancesAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InstancesBulkInsertOperationMetadata struct { ++ // PerLocationStatus: Status information per location (location name is ++ // key). Example key: zones/us-central1-a ++ PerLocationStatus map[string]BulkInsertOperationStatus `json:"perLocationStatus,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "PerLocationStatus") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "PerLocationStatus") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstancesBulkInsertOperationMetadata) MarshalJSON() ([]byte, error) { ++ type NoMethod InstancesBulkInsertOperationMetadata ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type InstancesGetEffectiveFirewallsResponse struct { + // FirewallPolicys: Effective firewalls from firewall policies. + FirewallPolicys []*InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy `json:"firewallPolicys,omitempty"` +@@ -26751,6 +27211,42 @@ func (s *InstancesSetNameRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InstancesSetSecurityPolicyRequest struct { ++ // NetworkInterfaces: The network interfaces that the security policy ++ // will be applied to. Network interfaces use the nicN naming format. ++ // You can only set a security policy for network interfaces with an ++ // access config. ++ NetworkInterfaces []string `json:"networkInterfaces,omitempty"` ++ ++ // SecurityPolicy: A full or partial URL to a security policy to add to ++ // this instance. If this field is set to an empty string it will remove ++ // the associated security policy. ++ SecurityPolicy string `json:"securityPolicy,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "NetworkInterfaces") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "NetworkInterfaces") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstancesSetSecurityPolicyRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod InstancesSetSecurityPolicyRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type InstancesSetServiceAccountRequest struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` +@@ -27654,9 +28150,9 @@ func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { + } + + // Interconnect: Represents an Interconnect resource. An Interconnect +-// resource is a dedicated connection between the GCP network and your +-// on-premises network. For more information, read the Dedicated +-// Interconnect Overview. ++// resource is a dedicated connection between the Google Cloud network ++// and your on-premises network. For more information, read the ++// Dedicated Interconnect Overview. + type Interconnect struct { + // AdminEnabled: Administrative status of the interconnect. When this is + // set to true, the Interconnect is functional and can carry traffic. +@@ -27665,6 +28161,16 @@ type Interconnect struct { + // set to true. + AdminEnabled bool `json:"adminEnabled,omitempty"` + ++ // AvailableFeatures: [Output only] List of features available for this ++ // interconnect, which can take one of the following values: - MACSEC If ++ // present then the interconnect was created on MACsec capable hardware ++ // ports. If not present then the interconnect is provisioned on ++ // non-MACsec capable ports and MACsec enablement will fail. ++ // ++ // Possible values: ++ // "IF_MACSEC" - Media Access Control security (MACsec) ++ AvailableFeatures []string `json:"availableFeatures,omitempty"` ++ + // CircuitInfos: [Output Only] A list of CircuitInfo objects, that + // describe the individual circuits in this LAG. + CircuitInfos []*InterconnectCircuitInfo `json:"circuitInfos,omitempty"` +@@ -27810,6 +28316,19 @@ type Interconnect struct { + // the interconnect is connected to. + RemoteLocation string `json:"remoteLocation,omitempty"` + ++ // RequestedFeatures: Optional. List of features requested for this ++ // interconnect, which can take one of the following values: - MACSEC If ++ // specified then the interconnect will be created on MACsec capable ++ // hardware ports. If not specified, the default value is false, which ++ // will allocate non-MACsec capable ports first if available. This ++ // parameter can only be provided during interconnect INSERT and cannot ++ // be changed using interconnect PATCH. Please review Interconnect ++ // Pricing for implications on enabling this flag. ++ // ++ // Possible values: ++ // "IF_MACSEC" - Media Access Control security (MACsec) ++ RequestedFeatures []string `json:"requestedFeatures,omitempty"` ++ + // RequestedLinkCount: Target number of physical links in the link + // bundle, as requested by the customer. + RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` +@@ -28026,8 +28545,7 @@ type InterconnectAttachment struct { + // attachment. If this field is not specified when creating the VLAN + // attachment, then later on when creating an HA VPN gateway on this + // VLAN attachment, the HA VPN gateway's IP address is allocated from +- // the regional external IP address pool. Not currently available +- // publicly. ++ // the regional external IP address pool. + IpsecInternalAddresses []string `json:"ipsecInternalAddresses,omitempty"` + + // Kind: [Output Only] Type of the resource. Always +@@ -29490,6 +30008,24 @@ type InterconnectLocation struct { + // zone. Example: "zone1" or "zone2". + AvailabilityZone string `json:"availabilityZone,omitempty"` + ++ // AvailableFeatures: [Output only] List of features available at this ++ // interconnect location, which can take one of the following values: - ++ // MACSEC ++ // ++ // Possible values: ++ // "IF_MACSEC" - Media Access Control security (MACsec) ++ AvailableFeatures []string `json:"availableFeatures,omitempty"` ++ ++ // AvailableLinkTypes: [Output only] List of link types available at ++ // this interconnect location, which can take one of the following ++ // values: - LINK_TYPE_ETHERNET_10G_LR - LINK_TYPE_ETHERNET_100G_LR ++ // ++ // Possible values: ++ // "LINK_TYPE_ETHERNET_100G_LR" - 100G Ethernet, LR Optics. ++ // "LINK_TYPE_ETHERNET_10G_LR" - 10G Ethernet, LR Optics. [(rate_bps) ++ // = 10000000000]; ++ AvailableLinkTypes []string `json:"availableLinkTypes,omitempty"` ++ + // City: [Output Only] Metropolitan area designator that indicates which + // city an interconnect is located. For example: "Chicago, IL", + // "Amsterdam, Netherlands". +@@ -30097,7 +30633,7 @@ func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// InterconnectRemoteLocation: Represents an Cross-Cloud Interconnect ++// InterconnectRemoteLocation: Represents a Cross-Cloud Interconnect + // Remote Location resource. You can use this resource to find remote + // location details about an Interconnect attachment (VLAN). + type InterconnectRemoteLocation struct { +@@ -30161,8 +30697,8 @@ type InterconnectRemoteLocation struct { + // Possible values: + // "LACP_SUPPORTED" - LACP_SUPPORTED: LACP is supported, and enabled + // by default on the Cross-Cloud Interconnect. +- // "LACP_UNSUPPORTED" - LACP_UNSUPPORTED: LACP is not supported and +- // will not be enabled on this port. GetDiagnostics will show ++ // "LACP_UNSUPPORTED" - LACP_UNSUPPORTED: LACP is not supported and is ++ // not be enabled on this port. GetDiagnostics shows + // bundleAggregationType as "static". GCP does not support LAGs without + // LACP, so requestedLinkCount must be 1. + Lacp string `json:"lacp,omitempty"` +@@ -30250,7 +30786,7 @@ type InterconnectRemoteLocationConstraints struct { + // incompatible with their cloud provider. Specifically, when ordering a + // redundant pair of Cross-Cloud Interconnect ports, and one of them + // uses a remote location with portPairMatchingRemoteLocation set to +- // matching, the UI will require that both ports use the same remote ++ // matching, the UI requires that both ports use the same remote + // location. + // + // Possible values: +@@ -30357,6 +30893,374 @@ type InterconnectRemoteLocationList struct { + // remote locations. + Kind string `json:"kind,omitempty"` + ++ // NextPageToken: [Output Only] This token lets you get the next page of ++ // results for list requests. If the number of results is larger than ++ // maxResults, use the nextPageToken as a value for the query parameter ++ // pageToken in the next list request. Subsequent list requests will ++ // have their own nextPageToken to continue paging through the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *InterconnectRemoteLocationListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectRemoteLocationListWarning: [Output Only] Informational ++// warning message. ++type InterconnectRemoteLocationListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InterconnectRemoteLocationListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationPermittedConnections struct { ++ // InterconnectLocation: [Output Only] URL of an Interconnect location ++ // that is permitted to connect to this Interconnect remote location. ++ InterconnectLocation string `json:"interconnectLocation,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "InterconnectLocation") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "InterconnectLocation") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationPermittedConnections ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectsGetDiagnosticsResponse: Response for the ++// InterconnectsGetDiagnosticsRequest. ++type InterconnectsGetDiagnosticsResponse struct { ++ Result *InterconnectDiagnostics `json:"result,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Result") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Result") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectsGetDiagnosticsResponse ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectsGetMacsecConfigResponse: Response for the ++// InterconnectsGetMacsecConfigRequest. ++type InterconnectsGetMacsecConfigResponse struct { ++ // Etag: end_interface: MixerGetResponseWithEtagBuilder ++ Etag string `json:"etag,omitempty"` ++ ++ Result *InterconnectMacsecConfig `json:"result,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Etag") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Etag") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectsGetMacsecConfigResponse) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectsGetMacsecConfigResponse ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InternalIpAddress struct { ++ // Cidr: IP CIDR address or range. ++ Cidr string `json:"cidr,omitempty"` ++ ++ // Owner: The owner of the internal IP address. ++ Owner string `json:"owner,omitempty"` ++ ++ // Purpose: The purpose of the internal IP address if applicable. ++ Purpose string `json:"purpose,omitempty"` ++ ++ // Region: The region of the internal IP address if applicable. ++ Region string `json:"region,omitempty"` ++ ++ // Type: The type of the internal IP address. ++ // ++ // Possible values: ++ // "PEER_RESERVED" - Reserved IP ranges on peer networks. ++ // "PEER_USED" - Used IP ranges on peer networks, including peer ++ // subnetwork IP ranges. ++ // "REMOTE_RESERVED" - Reserved IP ranges on peer networks of peer ++ // networks. ++ // "REMOTE_USED" - Used IP ranges on peer networks of peer networks. ++ // "RESERVED" - Reserved IP ranges on local network. ++ // "SUBNETWORK" - Subnetwork IP ranges on local network. ++ // "TYPE_UNSPECIFIED" ++ Type string `json:"type,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Cidr") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Cidr") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InternalIpAddress) MarshalJSON() ([]byte, error) { ++ type NoMethod InternalIpAddress ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InternalIpOwner struct { ++ // IpCidrRange: IP CIDR range being owned. ++ IpCidrRange string `json:"ipCidrRange,omitempty"` ++ ++ // Owners: URLs of the IP owners of the IP CIDR range. ++ Owners []string `json:"owners,omitempty"` ++ ++ // SystemOwned: Whether this IP CIDR range is reserved for system use. ++ SystemOwned bool `json:"systemOwned,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "IpCidrRange") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "IpCidrRange") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InternalIpOwner) MarshalJSON() ([]byte, error) { ++ type NoMethod InternalIpOwner ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type IpAddressesList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InternalIpAddress resources. ++ Items []*InternalIpAddress `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always compute#ipAddressesList ++ // for IP addresses lists. ++ Kind string `json:"kind,omitempty"` ++ + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query +@@ -30369,376 +31273,7 @@ type InterconnectRemoteLocationList struct { + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. +- Warning *InterconnectRemoteLocationListWarning `json:"warning,omitempty"` +- +- // ServerResponse contains the HTTP response code and headers from the +- // server. +- googleapi.ServerResponse `json:"-"` +- +- // ForceSendFields is a list of field names (e.g. "Id") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Id") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectRemoteLocationList +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-// InterconnectRemoteLocationListWarning: [Output Only] Informational +-// warning message. +-type InterconnectRemoteLocationListWarning struct { +- // Code: [Output Only] A warning code, if applicable. For example, +- // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in +- // the response. +- // +- // Possible values: +- // "CLEANUP_FAILED" - Warning about failed cleanup of transient +- // changes made by a failed operation. +- // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was +- // created. +- // "DEPRECATED_TYPE_USED" - When deploying and at least one of the +- // resources has a type marked as deprecated +- // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk +- // that is larger than image size. +- // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the +- // resources has a type marked as experimental +- // "EXTERNAL_API_WARNING" - Warning that is present in an external api +- // call +- // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been +- // overridden. Deprecated unused field. +- // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an +- // injected kernel, which is deprecated. +- // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV +- // backend service is associated with a health check that is not of type +- // HTTP/HTTPS/HTTP2. +- // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a +- // exceedingly large number of resources +- // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type +- // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is +- // not assigned to an instance on the network. +- // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot +- // ip forward. +- // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's +- // nextHopInstance URL refers to an instance that does not have an ipv6 +- // interface on the same network as the route. +- // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL +- // refers to an instance that does not exist. +- // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance +- // URL refers to an instance that is not on the same network as the +- // route. +- // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not +- // have a status of RUNNING. +- // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to +- // continue the process despite the mentioned error. +- // "NO_RESULTS_ON_PAGE" - No results are present on a particular list +- // page. +- // "PARTIAL_SUCCESS" - Success is reported, but some results may be +- // missing due to errors +- // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource +- // that requires a TOS they have not accepted. +- // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a +- // resource is in use. +- // "RESOURCE_NOT_DELETED" - One or more of the resources set to +- // auto-delete could not be deleted because they were in use. +- // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is +- // ignored. +- // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in +- // instance group manager is valid as such, but its application does not +- // make a lot of sense, because it allows only single instance in +- // instance group. +- // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema +- // are present +- // "UNREACHABLE" - A given scope cannot be reached. +- Code string `json:"code,omitempty"` +- +- // Data: [Output Only] Metadata about this warning in key: value format. +- // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" +- // } +- Data []*InterconnectRemoteLocationListWarningData `json:"data,omitempty"` +- +- // Message: [Output Only] A human-readable description of the warning +- // code. +- Message string `json:"message,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "Code") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Code") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectRemoteLocationListWarning +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type InterconnectRemoteLocationListWarningData struct { +- // Key: [Output Only] A key that provides more detail on the warning +- // being returned. For example, for warnings where there are no results +- // in a list request for a particular zone, this key might be scope and +- // the key value might be the zone name. Other examples might be a key +- // indicating a deprecated resource and a suggested replacement, or a +- // warning about invalid network settings (for example, if an instance +- // attempts to perform IP forwarding but is not enabled for IP +- // forwarding). +- Key string `json:"key,omitempty"` +- +- // Value: [Output Only] A warning data value corresponding to the key. +- Value string `json:"value,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "Key") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Key") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectRemoteLocationListWarningData +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type InterconnectRemoteLocationPermittedConnections struct { +- // InterconnectLocation: [Output Only] URL of an Interconnect location +- // that is permitted to connect to this Interconnect remote location. +- InterconnectLocation string `json:"interconnectLocation,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. +- // "InterconnectLocation") to unconditionally include in API requests. +- // By default, fields with empty or default values are omitted from API +- // requests. However, any non-pointer, non-interface field appearing in +- // ForceSendFields will be sent to the server regardless of whether the +- // field is empty or not. This may be used to include empty fields in +- // Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "InterconnectLocation") to +- // include in API requests with the JSON null value. By default, fields +- // with empty values are omitted from API requests. However, any field +- // with an empty value appearing in NullFields will be sent to the +- // server as null. It is an error if a field in this list has a +- // non-empty value. This may be used to include null fields in Patch +- // requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectRemoteLocationPermittedConnections +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-// InterconnectsGetDiagnosticsResponse: Response for the +-// InterconnectsGetDiagnosticsRequest. +-type InterconnectsGetDiagnosticsResponse struct { +- Result *InterconnectDiagnostics `json:"result,omitempty"` +- +- // ServerResponse contains the HTTP response code and headers from the +- // server. +- googleapi.ServerResponse `json:"-"` +- +- // ForceSendFields is a list of field names (e.g. "Result") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Result") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectsGetDiagnosticsResponse +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-// InterconnectsGetMacsecConfigResponse: Response for the +-// InterconnectsGetMacsecConfigRequest. +-type InterconnectsGetMacsecConfigResponse struct { +- // Etag: end_interface: MixerGetResponseWithEtagBuilder +- Etag string `json:"etag,omitempty"` +- +- Result *InterconnectMacsecConfig `json:"result,omitempty"` +- +- // ServerResponse contains the HTTP response code and headers from the +- // server. +- googleapi.ServerResponse `json:"-"` +- +- // ForceSendFields is a list of field names (e.g. "Etag") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Etag") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InterconnectsGetMacsecConfigResponse) MarshalJSON() ([]byte, error) { +- type NoMethod InterconnectsGetMacsecConfigResponse +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type InternalIpAddress struct { +- // Cidr: IP CIDR address or range. +- Cidr string `json:"cidr,omitempty"` +- +- // Owner: The owner of the internal IP address. +- Owner string `json:"owner,omitempty"` +- +- // Purpose: The purpose of the internal IP address if applicable. +- Purpose string `json:"purpose,omitempty"` +- +- // Region: The region of the internal IP address if applicable. +- Region string `json:"region,omitempty"` +- +- // Type: The type of the internal IP address. +- // +- // Possible values: +- // "PEER_RESERVED" - Reserved IP ranges on peer networks. +- // "PEER_USED" - Used IP ranges on peer networks, including peer +- // subnetwork IP ranges. +- // "REMOTE_RESERVED" - Reserved IP ranges on peer networks of peer +- // networks. +- // "REMOTE_USED" - Used IP ranges on peer networks of peer networks. +- // "RESERVED" - Reserved IP ranges on local network. +- // "SUBNETWORK" - Subnetwork IP ranges on local network. +- // "TYPE_UNSPECIFIED" +- Type string `json:"type,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "Cidr") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Cidr") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InternalIpAddress) MarshalJSON() ([]byte, error) { +- type NoMethod InternalIpAddress +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type InternalIpOwner struct { +- // IpCidrRange: IP CIDR range being owned. +- IpCidrRange string `json:"ipCidrRange,omitempty"` +- +- // Owners: URLs of the IP owners of the IP CIDR range. +- Owners []string `json:"owners,omitempty"` +- +- // SystemOwned: Whether this IP CIDR range is reserved for system use. +- SystemOwned bool `json:"systemOwned,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "IpCidrRange") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "IpCidrRange") to include +- // in API requests with the JSON null value. By default, fields with +- // empty values are omitted from API requests. However, any field with +- // an empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *InternalIpOwner) MarshalJSON() ([]byte, error) { +- type NoMethod InternalIpOwner +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type IpAddressesList struct { +- // Id: [Output Only] Unique identifier for the resource; defined by the +- // server. +- Id string `json:"id,omitempty"` +- +- // Items: A list of InternalIpAddress resources. +- Items []*InternalIpAddress `json:"items,omitempty"` +- +- // Kind: [Output Only] Type of resource. Always compute#ipAddressesList +- // for IP addresses lists. +- Kind string `json:"kind,omitempty"` +- +- // NextPageToken: [Output Only] This token allows you to get the next +- // page of results for list requests. If the number of results is larger +- // than maxResults, use the nextPageToken as a value for the query +- // parameter pageToken in the next list request. Subsequent list +- // requests will have their own nextPageToken to continue paging through +- // the results. +- NextPageToken string `json:"nextPageToken,omitempty"` +- +- // SelfLink: [Output Only] Server-defined URL for this resource. +- SelfLink string `json:"selfLink,omitempty"` +- +- // Warning: [Output Only] Informational warning message. +- Warning *IpAddressesListWarning `json:"warning,omitempty"` ++ Warning *IpAddressesListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. +@@ -32422,7 +32957,7 @@ type MachineTypeAccelerators struct { + GuestAcceleratorCount int64 `json:"guestAcceleratorCount,omitempty"` + + // GuestAcceleratorType: The accelerator type resource name, not a full +- // URL, e.g. 'nvidia-tesla-k80'. ++ // URL, e.g. nvidia-tesla-t4. + GuestAcceleratorType string `json:"guestAcceleratorType,omitempty"` + + // ForceSendFields is a list of field names (e.g. +@@ -33084,9 +33619,9 @@ type ManagedInstance struct { + // is empty when the instance does not exist. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -33630,6 +34165,46 @@ func (s *MetadataFilterLabelMatch) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// Money: Represents an amount of money with its currency type. ++type Money struct { ++ // CurrencyCode: The three-letter currency code defined in ISO 4217. ++ CurrencyCode string `json:"currencyCode,omitempty"` ++ ++ // Nanos: Number of nano (10^-9) units of the amount. The value must be ++ // between -999,999,999 and +999,999,999 inclusive. If `units` is ++ // positive, `nanos` must be positive or zero. If `units` is zero, ++ // `nanos` can be positive, zero, or negative. If `units` is negative, ++ // `nanos` must be negative or zero. For example $-1.75 is represented ++ // as `units`=-1 and `nanos`=-750,000,000. ++ Nanos int64 `json:"nanos,omitempty"` ++ ++ // Units: The whole units of the amount. For example if `currencyCode` ++ // is "USD", then 1 unit is one US dollar. ++ Units int64 `json:"units,omitempty,string"` ++ ++ // ForceSendFields is a list of field names (e.g. "CurrencyCode") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "CurrencyCode") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *Money) MarshalJSON() ([]byte, error) { ++ type NoMethod Money ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // MutualTls: [Deprecated] Configuration for the mutual Tls mode for + // peer authentication. Configuration for the mutual Tls mode for peer + // authentication. +@@ -33703,6 +34278,113 @@ func (s *NamedPort) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// NatIpInfo: Contains NAT IP information of a NAT config (i.e. usage ++// status, mode). ++type NatIpInfo struct { ++ // NatIpInfoMappings: A list of all NAT IPs assigned to this NAT config. ++ NatIpInfoMappings []*NatIpInfoNatIpInfoMapping `json:"natIpInfoMappings,omitempty"` ++ ++ // NatName: Name of the NAT config which the NAT IP belongs to. ++ NatName string `json:"natName,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "NatIpInfoMappings") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "NatIpInfoMappings") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NatIpInfo) MarshalJSON() ([]byte, error) { ++ type NoMethod NatIpInfo ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// NatIpInfoNatIpInfoMapping: Contains information of a NAT IP. ++type NatIpInfoNatIpInfoMapping struct { ++ // Mode: Specifies whether NAT IP is auto or manual. ++ // ++ // Possible values: ++ // "AUTO" ++ // "MANUAL" ++ Mode string `json:"mode,omitempty"` ++ ++ // NatIp: NAT IP address. For example: 203.0.113.11. ++ NatIp string `json:"natIp,omitempty"` ++ ++ // Usage: Specifies whether NAT IP is currently serving at least one ++ // endpoint or not. ++ // ++ // Possible values: ++ // "IN_USE" ++ // "UNUSED" ++ Usage string `json:"usage,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Mode") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Mode") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NatIpInfoNatIpInfoMapping) MarshalJSON() ([]byte, error) { ++ type NoMethod NatIpInfoNatIpInfoMapping ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type NatIpInfoResponse struct { ++ // Result: [Output Only] A list of NAT IP information. ++ Result []*NatIpInfo `json:"result,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Result") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Result") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NatIpInfoResponse) MarshalJSON() ([]byte, error) { ++ type NoMethod NatIpInfoResponse ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // Network: Represents a VPC Network resource. Networks connect + // resources to each other and to the internet. For more information, + // read Virtual Private Cloud (VPC) Network. +@@ -33740,7 +34422,7 @@ type Network struct { + FirewallPolicy string `json:"firewallPolicy,omitempty"` + + // GatewayIPv4: [Output Only] The gateway address for default routing +- // out of the network, selected by GCP. ++ // out of the network, selected by Google Cloud. + GatewayIPv4 string `json:"gatewayIPv4,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This +@@ -33858,10 +34540,9 @@ type NetworkAttachment struct { + // property when you create the resource. + Description string `json:"description,omitempty"` + +- // Fingerprint: [Output Only] Fingerprint of this resource. A hash of +- // the contents stored in this object. This field is used in optimistic +- // locking. An up-to-date fingerprint must be provided in order to +- // patch. ++ // Fingerprint: Fingerprint of this resource. A hash of the contents ++ // stored in this object. This field is used in optimistic locking. An ++ // up-to-date fingerprint must be provided in order to patch. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The +@@ -33881,7 +34562,11 @@ type NetworkAttachment struct { + Name string `json:"name,omitempty"` + + // Network: [Output Only] The URL of the network which the Network +- // Attachment belongs to. ++ // Attachment belongs to. Practically it is inferred by fetching the ++ // network of the first subnetwork associated. Because it is required ++ // that all the subnetworks must be from the same network, it is assured ++ // that the Network Attachment belongs to the same network as all the ++ // subnetworks. + Network string `json:"network,omitempty"` + + // ProducerAcceptLists: Projects that are allowed to connect to this +@@ -34132,7 +34817,7 @@ func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, erro + // NetworkAttachmentConnectedEndpoint: [Output Only] A connection + // connected to this network attachment. + type NetworkAttachmentConnectedEndpoint struct { +- // IpAddress: The IP address assigned to the producer instance network ++ // IpAddress: The IPv4 address assigned to the producer instance network + // interface. This value will be a range in case of Serverless. + IpAddress string `json:"ipAddress,omitempty"` + +@@ -34140,7 +34825,7 @@ type NetworkAttachmentConnectedEndpoint struct { + // the IP was assigned. + ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` + +- // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork ++ // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork. + SecondaryIpCidrRanges []string `json:"secondaryIpCidrRanges,omitempty"` + + // Status: The status of a connected endpoint to this network +@@ -36487,10 +37172,11 @@ type NetworkInterface struct { + // number. It'll be empty if not specified by the users. + QueueCount int64 `json:"queueCount,omitempty"` + +- // StackType: The stack type for this network interface to identify +- // whether the IPv6 feature is enabled or not. If not specified, +- // IPV4_ONLY will be used. This field can be both set at instance +- // creation and update network interface operations. ++ // StackType: The stack type for this network interface. To assign only ++ // IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 ++ // addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This ++ // field can be both set at instance creation and update network ++ // interface operations. + // + // Possible values: + // "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 +@@ -40437,6 +41123,8 @@ type Operation struct { + // This value is in RFC3339 text format. + InsertTime string `json:"insertTime,omitempty"` + ++ InstancesBulkInsertOperationMetadata *InstancesBulkInsertOperationMetadata `json:"instancesBulkInsertOperationMetadata,omitempty"` ++ + // Kind: [Output Only] Type of the resource. Always `compute#operation` + // for Operation resources. + Kind string `json:"kind,omitempty"` +@@ -45532,11 +46220,15 @@ type Quota struct { + // "NETWORK_ATTACHMENTS" + // "NETWORK_ENDPOINT_GROUPS" + // "NETWORK_FIREWALL_POLICIES" ++ // "NET_LB_SECURITY_POLICIES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION" + // "NODE_GROUPS" + // "NODE_TEMPLATES" + // "NVIDIA_A100_80GB_GPUS" + // "NVIDIA_A100_GPUS" + // "NVIDIA_K80_GPUS" ++ // "NVIDIA_L4_GPUS" + // "NVIDIA_P100_GPUS" + // "NVIDIA_P100_VWS_GPUS" + // "NVIDIA_P4_GPUS" +@@ -45551,6 +46243,7 @@ type Quota struct { + // "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS" + // "PREEMPTIBLE_NVIDIA_A100_GPUS" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" ++ // "PREEMPTIBLE_NVIDIA_L4_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_GPUS" +@@ -45578,6 +46271,7 @@ type Quota struct { + // "ROUTES" + // "SECURITY_POLICIES" + // "SECURITY_POLICIES_PER_REGION" ++ // "SECURITY_POLICY_ADVANCED_RULES_PER_REGION" + // "SECURITY_POLICY_CEVAL_RULES" + // "SECURITY_POLICY_RULES" + // "SECURITY_POLICY_RULES_PER_REGION" +@@ -50577,8 +51271,6 @@ type ResourceStatus struct { + // instance.serviceIntegrationSpecs. + ServiceIntegrationStatuses map[string]ResourceStatusServiceIntegrationStatus `json:"serviceIntegrationStatuses,omitempty"` + +- UpcomingMaintenance *ResourceStatusUpcomingMaintenance `json:"upcomingMaintenance,omitempty"` +- + // ForceSendFields is a list of field names (e.g. "PhysicalHost") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -50720,34 +51412,6 @@ func (s *ResourceStatusServiceIntegrationStatusBackupDRStatus) MarshalJSON() ([] + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-type ResourceStatusUpcomingMaintenance struct { +- // CanReschedule: Indicates if the maintenance can be customer +- // triggered. See go/sf-ctm-design for more details +- CanReschedule bool `json:"canReschedule,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "CanReschedule") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "CanReschedule") to include +- // in API requests with the JSON null value. By default, fields with +- // empty values are omitted from API requests. However, any field with +- // an empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *ResourceStatusUpcomingMaintenance) MarshalJSON() ([]byte, error) { +- type NoMethod ResourceStatusUpcomingMaintenance +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- + // RolloutPolicy: A rollout policy configuration. + type RolloutPolicy struct { + // DefaultRolloutTime: An optional RFC3339 timestamp on or after which +@@ -51771,14 +52435,15 @@ type RouterBgpPeer struct { + // Bfd: BFD configuration for the BGP peering. + Bfd *RouterBgpPeerBfd `json:"bfd,omitempty"` + +- // CustomLearnedIpRanges: User-defined Custom Learned Route IP range +- // list for a BGP session. ++ // CustomLearnedIpRanges: A list of user-defined custom learned route IP ++ // address ranges for a BGP session. + CustomLearnedIpRanges []*RouterBgpPeerCustomLearnedIpRange `json:"customLearnedIpRanges,omitempty"` + +- // CustomLearnedRoutePriority: User-defined Custom Learned Route +- // Priority for a BGP session. This will be applied to all Custom +- // Learned Route ranges of the BGP session, if not given, google-managed +- // priority of 100 is used. ++ // CustomLearnedRoutePriority: The user-defined custom learned route ++ // priority for a BGP session. This value is applied to all custom ++ // learned route ranges for the session. You can choose a value from `0` ++ // to `65335`. If you don't provide a value, Google Cloud assigns a ++ // priority of `100` to the ranges. + CustomLearnedRoutePriority int64 `json:"customLearnedRoutePriority,omitempty"` + + // Enable: The status of the BGP peer connection. If set to FALSE, any +@@ -51978,10 +52643,10 @@ func (s *RouterBgpPeerBfd) MarshalJSON() ([]byte, error) { + } + + type RouterBgpPeerCustomLearnedIpRange struct { +- // Range: The Custom Learned Route IP range. Must be a valid +- // CIDR-formatted prefix. If an IP is provided without a subnet mask, it +- // is interpreted as a /32 singular IP range for IPv4, and /128 for +- // IPv6. ++ // Range: The custom learned route IP address range. Must be a valid ++ // CIDR-formatted prefix. If an IP address is provided without a subnet ++ // mask, it is interpreted as, for IPv4, a `/32` singular IP address ++ // range, and, for IPv6, `/128`. + Range string `json:"range,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Range") to +@@ -52438,10 +53103,9 @@ type RouterNat struct { + // in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list + // of Subnetworks are allowed to Nat (specified in the field subnetwork + // below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. +- // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or +- // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any +- // other Router.Nat section in any Router for this network in this +- // region. ++ // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then ++ // there should not be any other Router.Nat section in any Router for ++ // this network in this region. + // + // Possible values: + // "ALL_SUBNETWORKS_ALL_IP_RANGES" - All the IP ranges in every +@@ -54461,13 +55125,17 @@ func (s *SecurityPolicyAdaptiveProtectionConfigAutoDeployConfig) UnmarshalJSON(d + } + + // SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig: +-// Configuration options for L7 DDoS detection. ++// Configuration options for L7 DDoS detection. This field is only ++// supported in Global Security Policies of type CLOUD_ARMOR. + type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { +- // Enable: If set to true, enables CAAP for L7 DDoS detection. ++ // Enable: If set to true, enables CAAP for L7 DDoS detection. This ++ // field is only supported in Global Security Policies of type ++ // CLOUD_ARMOR. + Enable bool `json:"enable,omitempty"` + + // RuleVisibility: Rule visibility can be one of the following: STANDARD +- // - opaque rules. (default) PREMIUM - transparent rules. ++ // - opaque rules. (default) PREMIUM - transparent rules. This field is ++ // only supported in Global Security Policies of type CLOUD_ARMOR. + // + // Possible values: + // "PREMIUM" +@@ -54505,6 +55173,7 @@ type SecurityPolicyAdvancedOptionsConfig struct { + // Possible values: + // "DISABLED" + // "STANDARD" ++ // "STANDARD_WITH_GRAPHQL" + JsonParsing string `json:"jsonParsing,omitempty"` + + // Possible values: +@@ -54512,6 +55181,10 @@ type SecurityPolicyAdvancedOptionsConfig struct { + // "VERBOSE" + LogLevel string `json:"logLevel,omitempty"` + ++ // UserIpRequestHeaders: An optional list of case-insensitive request ++ // header names to use for resolving the callers client IP address. ++ UserIpRequestHeaders []string `json:"userIpRequestHeaders,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "JsonCustomConfig") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -54867,7 +55540,8 @@ type SecurityPolicyRecaptchaOptionsConfig struct { + // GOOGLE_RECAPTCHA under the security policy. The specified site key + // needs to be created from the reCAPTCHA API. The user is responsible + // for the validity of the specified site key. If not specified, a +- // Google-managed site key is used. ++ // Google-managed site key is used. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectSiteKey string `json:"redirectSiteKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RedirectSiteKey") to +@@ -54935,10 +55609,11 @@ type SecurityPolicyRule struct { + // rate_limit_options to be set. - redirect: redirect to a different + // target. This can either be an internal reCAPTCHA redirect, or an + // external URL-based redirect via a 302 response. Parameters for this +- // action can be configured via redirectOptions. - throttle: limit +- // client traffic to the configured threshold. Configure parameters for +- // this action in rateLimitOptions. Requires rate_limit_options to be +- // set for this. ++ // action can be configured via redirectOptions. This action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. - ++ // throttle: limit client traffic to the configured threshold. Configure ++ // parameters for this action in rateLimitOptions. Requires ++ // rate_limit_options to be set for this. + Action string `json:"action,omitempty"` + + // Description: An optional description of this resource. Provide this +@@ -54962,7 +55637,8 @@ type SecurityPolicyRule struct { + EnableLogging bool `json:"enableLogging,omitempty"` + + // HeaderAction: Optional, additional actions that are performed on +- // headers. ++ // headers. This field is only supported in Global Security Policies of ++ // type CLOUD_ARMOR. + HeaderAction *SecurityPolicyRuleHttpHeaderAction `json:"headerAction,omitempty"` + + // Kind: [Output only] Type of the resource. Always +@@ -55018,7 +55694,8 @@ type SecurityPolicyRule struct { + RateLimitOptions *SecurityPolicyRuleRateLimitOptions `json:"rateLimitOptions,omitempty"` + + // RedirectOptions: Parameters defining the redirect action. Cannot be +- // specified for any other actions. ++ // specified for any other actions. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectOptions *SecurityPolicyRuleRedirectOptions `json:"redirectOptions,omitempty"` + + // RedirectTarget: This must be specified for redirect actions. Cannot +@@ -55151,9 +55828,19 @@ type SecurityPolicyRuleMatcher struct { + + // Expr: User defined CEVAL expression. A CEVAL expression is used to + // specify match criteria such as origin.ip, source.region_code and +- // contents in the request header. ++ // contents in the request header. Expressions containing ++ // `evaluateThreatIntelligence` require Cloud Armor Managed Protection ++ // Plus tier and are not supported in Edge Policies nor in Regional ++ // Policies. Expressions containing ++ // `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor ++ // Managed Protection Plus tier and are only supported in Global ++ // Security Policies. + Expr *Expr `json:"expr,omitempty"` + ++ // ExprOptions: The configuration options available when specifying a ++ // user defined CEVAL expression (i.e., 'expr'). ++ ExprOptions *SecurityPolicyRuleMatcherExprOptions `json:"exprOptions,omitempty"` ++ + // VersionedExpr: Preconfigured versioned expression. If this field is + // specified, config must also be specified. Available preconfigured + // expressions along with their requirements are: SRC_IPS_V1 - must +@@ -55306,6 +55993,73 @@ func (s *SecurityPolicyRuleMatcherConfigLayer4Config) MarshalJSON() ([]byte, err + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type SecurityPolicyRuleMatcherExprOptions struct { ++ // RecaptchaOptions: reCAPTCHA configuration options to be applied for ++ // the rule. If the rule does not evaluate reCAPTCHA tokens, this field ++ // will have no effect. ++ RecaptchaOptions *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions `json:"recaptchaOptions,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "RecaptchaOptions") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "RecaptchaOptions") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleMatcherExprOptions) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleMatcherExprOptions ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions struct { ++ // ActionTokenSiteKeys: A list of site keys to be used during the ++ // validation of reCAPTCHA action-tokens. The provided site keys need to ++ // be created from reCAPTCHA API under the same project where the ++ // security policy is created. ++ ActionTokenSiteKeys []string `json:"actionTokenSiteKeys,omitempty"` ++ ++ // SessionTokenSiteKeys: A list of site keys to be used during the ++ // validation of reCAPTCHA session-tokens. The provided site keys need ++ // to be created from reCAPTCHA API under the same project where the ++ // security policy is created. ++ SessionTokenSiteKeys []string `json:"sessionTokenSiteKeys,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ActionTokenSiteKeys") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ActionTokenSiteKeys") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // SecurityPolicyRuleNetworkMatcher: Represents a match condition that + // incoming network traffic is evaluated against. + type SecurityPolicyRuleNetworkMatcher struct { +@@ -55593,7 +56347,8 @@ type SecurityPolicyRuleRateLimitOptions struct { + // response code, or redirect to a different endpoint. Valid options are + // `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, + // and 502, and `redirect`, where the redirect parameters come from +- // `exceedRedirectOptions` below. ++ // `exceedRedirectOptions` below. The `redirect` action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. + ExceedAction string `json:"exceedAction,omitempty"` + + // ExceedActionRpcStatus: Specified gRPC response status for proxyless +@@ -55602,7 +56357,8 @@ type SecurityPolicyRuleRateLimitOptions struct { + + // ExceedRedirectOptions: Parameters defining the redirect action that + // is used as the exceed action. Cannot be specified if the exceed +- // action is not redirect. ++ // action is not redirect. This field is only supported in Global ++ // Security Policies of type CLOUD_ARMOR. + ExceedRedirectOptions *SecurityPolicyRuleRedirectOptions `json:"exceedRedirectOptions,omitempty"` + + // RateLimitThreshold: Threshold at which to begin ratelimiting. +@@ -55881,7 +56637,7 @@ type SecuritySettings struct { + // should authenticate with this service's backends. clientTlsPolicy + // only applies to a global BackendService with the loadBalancingScheme + // set to INTERNAL_SELF_MANAGED. If left blank, communications are not +- // encrypted. Note: This field currently has no impact. ++ // encrypted. + ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` + + // ClientTlsSettings: [Deprecated] TLS Settings for the backend service. +@@ -55899,8 +56655,7 @@ type SecuritySettings struct { + // Public Key Infrastructure which provisions server identities. Only + // applies to a global BackendService with loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. Only applies when BackendService has an +- // attached clientTlsPolicy with clientCertificate (mTLS mode). Note: +- // This field currently has no impact. ++ // attached clientTlsPolicy with clientCertificate (mTLS mode). + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Authentication") to +@@ -56097,7 +56852,7 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + // attachment represents a service that a producer has exposed. It + // encapsulates the load balancer which fronts the service runs and a + // list of NAT IP ranges that the producers uses to represent the +-// consumers connecting to the service. next tag = 20 ++// consumers connecting to the service. + type ServiceAttachment struct { + // ConnectedEndpoints: [Output Only] An array of connections for all the + // consumers connected to this service attachment. +@@ -56182,6 +56937,18 @@ type ServiceAttachment struct { + // the PSC service attachment. + PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"` + ++ // ReconcileConnections: This flag determines whether a consumer ++ // accept/reject list change can reconcile the statuses of existing ++ // ACCEPTED or REJECTED PSC endpoints. - If false, connection policy ++ // update will only affect existing PENDING PSC endpoints. Existing ++ // ACCEPTED/REJECTED endpoints will remain untouched regardless how the ++ // connection policy is modified . - If true, update will affect both ++ // PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED ++ // PSC endpoint will be moved to REJECTED if its project is added to the ++ // reject list. For newly created service attachment, this boolean ++ // defaults to true. ++ ReconcileConnections bool `json:"reconcileConnections,omitempty"` ++ + // Region: [Output Only] URL of the region where the service attachment + // resides. This field applies only to the region resource. You must + // specify this field as part of the HTTP request URL. It is not +@@ -56918,8 +57685,7 @@ func (s *ServiceIntegrationSpecBackupDRSpec) MarshalJSON() ([]byte, error) { + + // SetCommonInstanceMetadataOperationMetadata: Encapsulates partial + // completion metadata for SetCommonInstanceMetadata. Will be propagated +-// on Operation.metadata as per go/partial-completion-api-clean. See +-// go/gce-aips/2822 for API council results. ++// on Operation.metadata. + type SetCommonInstanceMetadataOperationMetadata struct { + ClientOperationId string `json:"clientOperationId,omitempty"` + +@@ -57478,6 +58244,11 @@ type Snapshot struct { + // snapshot to a disk. + DownloadBytes int64 `json:"downloadBytes,omitempty,string"` + ++ // EnableConfidentialCompute: Whether this snapshot is created from a ++ // confidential compute mode disk. [Output Only]: This field is not set ++ // by user, but from source disk. ++ EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` ++ + // GuestFlush: [Input Only] Whether to attempt an application consistent + // snapshot by informing the OS to prepare for the snapshot process. + GuestFlush bool `json:"guestFlush,omitempty"` +@@ -57574,6 +58345,10 @@ type Snapshot struct { + // customer-supplied encryption key. + SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` + ++ // SourceDiskForRecoveryCheckpoint: The source disk whose recovery ++ // checkpoint will be used to create this snapshot. ++ SourceDiskForRecoveryCheckpoint string `json:"sourceDiskForRecoveryCheckpoint,omitempty"` ++ + // SourceDiskId: [Output Only] The ID value of the disk used to create + // this snapshot. This value may be used to determine whether the + // snapshot was taken from the current or a previous instance of a given +@@ -59851,278 +60626,94 @@ func (s *Status) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// Subnetwork: Represents a Subnetwork resource. A subnetwork (also +-// known as a subnet) is a logical partition of a Virtual Private Cloud +-// network with one primary IP range and zero or more secondary IP +-// ranges. For more information, read Virtual Private Cloud (VPC) +-// Network. +-type Subnetwork struct { +- // AggregationInterval: Can only be specified if VPC flow logging for +- // this subnetwork is enabled. Sets the aggregation interval for +- // collecting flow logs. Increasing the interval time reduces the amount +- // of generated flow logs for long-lasting connections. Default is an +- // interval of 5 seconds per connection. Valid values: INTERVAL_5_SEC, +- // INTERVAL_30_SEC, INTERVAL_1_MIN, INTERVAL_5_MIN, INTERVAL_10_MIN, +- // INTERVAL_15_MIN. +- // +- // Possible values: +- // "INTERVAL_10_MIN" +- // "INTERVAL_15_MIN" +- // "INTERVAL_1_MIN" +- // "INTERVAL_30_SEC" +- // "INTERVAL_5_MIN" +- // "INTERVAL_5_SEC" +- AggregationInterval string `json:"aggregationInterval,omitempty"` +- +- // AllowSubnetCidrRoutesOverlap: Whether this subnetwork's ranges can +- // conflict with existing static routes. Setting this to true allows +- // this subnetwork's primary and secondary ranges to overlap with (and +- // contain) static routes that have already been configured on the +- // corresponding network. For example if a static route has range +- // 10.1.0.0/16, a subnet range 10.0.0.0/8 could only be created if +- // allow_conflicting_routes=true. Overlapping is only allowed on +- // subnetwork operations; routes whose ranges conflict with this +- // subnetwork's ranges won't be allowed unless +- // route.allow_conflicting_subnetworks is set to true. Typically packets +- // destined to IPs within the subnetwork (which may contain +- // private/sensitive data) are prevented from leaving the virtual +- // network. Setting this field to true will disable this feature. The +- // default value is false and applies to all existing subnetworks and +- // automatically created subnetworks. This field cannot be set to true +- // at resource creation time. +- AllowSubnetCidrRoutesOverlap bool `json:"allowSubnetCidrRoutesOverlap,omitempty"` +- ++// StoragePool: Represents a zonal storage pool resource. ++type StoragePool struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this +- // property when you create the resource. This field can be set only at +- // resource creation time. ++ // property when you create the resource. + Description string `json:"description,omitempty"` + +- // EnableFlowLogs: Whether to enable flow logging for this subnetwork. +- // If this field is not explicitly set, it will not appear in get +- // listings. If not set the default behavior is determined by the org +- // policy, if there is no org policy specified, then it will default to +- // disabled. This field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. +- EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` +- +- // EnableL2: Enables Layer2 communication on the subnetwork. +- EnableL2 bool `json:"enableL2,omitempty"` +- +- // EnablePrivateV6Access: Deprecated in favor of enable in +- // PrivateIpv6GoogleAccess. Whether the VMs in this subnet can directly +- // access Google services via internal IPv6 addresses. This field can be +- // both set at resource creation time and updated using patch. +- EnablePrivateV6Access bool `json:"enablePrivateV6Access,omitempty"` +- +- // ExternalIpv6Prefix: The external IPv6 address range that is owned by +- // this subnetwork. +- ExternalIpv6Prefix string `json:"externalIpv6Prefix,omitempty"` +- +- // Fingerprint: Fingerprint of this resource. A hash of the contents +- // stored in this object. This field is used in optimistic locking. This +- // field will be ignored when inserting a Subnetwork. An up-to-date +- // fingerprint must be provided in order to update the Subnetwork, +- // otherwise the request will fail with error 412 conditionNotMet. To +- // see the latest fingerprint, make a get() request to retrieve a +- // Subnetwork. +- Fingerprint string `json:"fingerprint,omitempty"` +- +- // FlowSampling: Can only be specified if VPC flow logging for this +- // subnetwork is enabled. The value of the field must be in [0, 1]. Set +- // the sampling rate of VPC flow logs within the subnetwork where 1.0 +- // means all collected logs are reported and 0.0 means no logs are +- // reported. Default is 0.5 unless otherwise specified by the org +- // policy, which means half of all collected logs are reported. +- FlowSampling float64 `json:"flowSampling,omitempty"` +- +- // GatewayAddress: [Output Only] The gateway address for default routes +- // to reach destination addresses outside this subnetwork. +- GatewayAddress string `json:"gatewayAddress,omitempty"` +- + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + +- // InternalIpv6Prefix: [Output Only] The internal IPv6 address range +- // that is assigned to this subnetwork. +- InternalIpv6Prefix string `json:"internalIpv6Prefix,omitempty"` +- +- // IpCidrRange: The range of internal addresses that are owned by this +- // subnetwork. Provide this property when you create the subnetwork. For +- // example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and +- // non-overlapping within a network. Only IPv4 is supported. This field +- // is set at resource creation time. The range can be any range listed +- // in the Valid ranges list. The range can be expanded after creation +- // using expandIpCidrRange. +- IpCidrRange string `json:"ipCidrRange,omitempty"` +- +- // Ipv6AccessType: The access type of IPv6 address this subnet holds. +- // It's immutable and can only be specified during creation or the first +- // time the subnet is updated into IPV4_IPV6 dual stack. +- // +- // Possible values: +- // "EXTERNAL" - VMs on this subnet will be assigned IPv6 addresses +- // that are accessible via the Internet, as well as the VPC network. +- // "INTERNAL" - VMs on this subnet will be assigned IPv6 addresses +- // that are only accessible over the VPC network. +- Ipv6AccessType string `json:"ipv6AccessType,omitempty"` +- +- // Ipv6CidrRange: [Output Only] This field is for internal use. +- Ipv6CidrRange string `json:"ipv6CidrRange,omitempty"` +- +- // Kind: [Output Only] Type of the resource. Always compute#subnetwork +- // for Subnetwork resources. ++ // Kind: [Output Only] Type of the resource. Always compute#storagePool ++ // for storage pools. + Kind string `json:"kind,omitempty"` + +- // LogConfig: This field denotes the VPC flow logging options for this +- // subnetwork. If logging is enabled, logs are exported to Cloud +- // Logging. +- LogConfig *SubnetworkLogConfig `json:"logConfig,omitempty"` ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // storage pool, which is essentially a hash of the labels set used for ++ // optimistic locking. The fingerprint is initially generated by Compute ++ // Engine and changes after every request to modify or update labels. ++ // You must always provide an up-to-date fingerprint hash in order to ++ // update or change labels, otherwise the request will fail with error ++ // 412 conditionNotMet. To see the latest fingerprint, make a get() ++ // request to retrieve a storage pool. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` + +- // Metadata: Can only be specified if VPC flow logging for this +- // subnetwork is enabled. Configures whether metadata fields should be +- // added to the reported VPC flow logs. Options are +- // INCLUDE_ALL_METADATA, EXCLUDE_ALL_METADATA, and CUSTOM_METADATA. +- // Default is EXCLUDE_ALL_METADATA. +- // +- // Possible values: +- // "EXCLUDE_ALL_METADATA" +- // "INCLUDE_ALL_METADATA" +- Metadata string `json:"metadata,omitempty"` ++ // Labels: Labels to apply to this storage pool. These can be later ++ // modified by the setLabels method. ++ Labels map[string]string `json:"labels,omitempty"` + +- // Name: The name of the resource, provided by the client when initially +- // creating the resource. The name must be 1-63 characters long, and +- // comply with RFC1035. Specifically, the name must be 1-63 characters +- // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` +- // which means the first character must be a lowercase letter, and all +- // following characters must be a dash, lowercase letter, or digit, +- // except the last character, which cannot be a dash. ++ // Name: Name of the resource. Provided by the client when the resource ++ // is created. The name must be 1-63 characters long, and comply with ++ // RFC1035. Specifically, the name must be 1-63 characters long and ++ // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means ++ // the first character must be a lowercase letter, and all following ++ // characters must be a dash, lowercase letter, or digit, except the ++ // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + +- // Network: The URL of the network to which this subnetwork belongs, +- // provided by the client when initially creating the subnetwork. This +- // field can be set only at resource creation time. +- Network string `json:"network,omitempty"` +- +- // PrivateIpGoogleAccess: Whether the VMs in this subnet can access +- // Google services without assigned external IP addresses. This field +- // can be both set at resource creation time and updated using +- // setPrivateIpGoogleAccess. +- PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` +- +- // PrivateIpv6GoogleAccess: This field is for internal use. This field +- // can be both set at resource creation time and updated using patch. +- // +- // Possible values: +- // "DISABLE_GOOGLE_ACCESS" - Disable private IPv6 access to/from +- // Google services. +- // "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE" - Bidirectional private +- // IPv6 access to/from Google services. +- // "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE" - Outbound private IPv6 +- // access from VMs in this subnet to Google services. +- PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` +- +- // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. +- // +- // Possible values: +- // "AGGREGATE" - Subnetwork used to aggregate multiple private +- // subnetworks. +- // "CLOUD_EXTENSION" - Subnetworks created for Cloud Extension +- // Machines. +- // "GLOBAL_MANAGED_PROXY" - Subnet reserved for Global Internal +- // HTTP(S) Load Balancing. +- // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal +- // HTTP(S) Load Balancing. +- // "PRIVATE" - Regular user created or automatically created subnet. +- // "PRIVATE_NAT" - Subnetwork used as source range for Private NAT +- // Gateways. +- // "PRIVATE_RFC_1918" - Regular user created or automatically created +- // subnet. +- // "PRIVATE_SERVICE_CONNECT" - Subnetworks created for Private Service +- // Connect in the producer network. +- // "REGIONAL_MANAGED_PROXY" - Subnetwork used for Regional +- // Internal/External HTTP(S) Load Balancing. +- Purpose string `json:"purpose,omitempty"` +- +- // Region: URL of the region where the Subnetwork resides. This field +- // can be set only at resource creation time. +- Region string `json:"region,omitempty"` +- +- // ReservedInternalRange: The URL of the reserved internal range. +- ReservedInternalRange string `json:"reservedInternalRange,omitempty"` +- +- // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one +- // that is ready to be promoted to ACTIVE or is currently draining. This +- // field can be updated with a patch request. +- // +- // Possible values: +- // "ACTIVE" - The ACTIVE subnet that is currently used. +- // "BACKUP" - The BACKUP subnet that could be promoted to ACTIVE. +- Role string `json:"role,omitempty"` ++ // ProvisionedIops: Provsioned IOPS of the storage pool. ++ ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + +- // SecondaryIpRanges: An array of configurations for secondary IP ranges +- // for VM instances contained in this subnetwork. The primary IP of such +- // VM must belong to the primary ipCidrRange of the subnetwork. The +- // alias IPs may belong to either primary or secondary ranges. This +- // field can be updated with a patch request. +- SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` ++ // ResourceStatus: [Output Only] Status information for the storage pool ++ // resource. ++ ResourceStatus *StoragePoolResourceStatus `json:"resourceStatus,omitempty"` + +- // SelfLink: [Output Only] Server-defined URL for the resource. ++ // SelfLink: [Output Only] Server-defined fully-qualified URL for this ++ // resource. + SelfLink string `json:"selfLink,omitempty"` + +- // SelfLinkWithId: [Output Only] Server-defined URL for this resource +- // with the resource id. ++ // SelfLinkWithId: [Output Only] Server-defined URL for this resource's ++ // resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + +- // StackType: The stack type for the subnet. If set to IPV4_ONLY, new +- // VMs in the subnet are assigned IPv4 addresses only. If set to +- // IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 +- // addresses. If not specified, IPV4_ONLY is used. This field can be +- // both set at resource creation time and updated using patch. ++ // SizeGb: Size, in GiB, of the storage pool. ++ SizeGb int64 `json:"sizeGb,omitempty,string"` ++ ++ // State: [Output Only] The status of storage pool creation. - CREATING: ++ // Storage pool is provisioning. storagePool. - FAILED: Storage pool ++ // creation failed. - READY: Storage pool is ready for use. - DELETING: ++ // Storage pool is deleting. + // + // Possible values: +- // "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6 +- // addresses. +- // "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 +- // addresses. +- StackType string `json:"stackType,omitempty"` ++ // "CREATING" - StoragePool is provisioning ++ // "DELETING" - StoragePool is deleting. ++ // "FAILED" - StoragePool creation failed. ++ // "READY" - StoragePool is ready for use. ++ State string `json:"state,omitempty"` + +- // State: [Output Only] The state of the subnetwork, which can be one of +- // the following values: READY: Subnetwork is created and ready to use +- // DRAINING: only applicable to subnetworks that have the purpose set to +- // INTERNAL_HTTPS_LOAD_BALANCER and indicates that connections to the +- // load balancer are being drained. A subnetwork that is draining cannot +- // be used or modified until it reaches a status of READY ++ // Type: Type of the storage pool + // + // Possible values: +- // "DRAINING" - Subnetwork is being drained. +- // "READY" - Subnetwork is ready for use. +- State string `json:"state,omitempty"` ++ // "SSD" ++ // "UNSPECIFIED" ++ Type string `json:"type,omitempty"` + +- // Vlans: A repeated field indicating the VLAN IDs supported on this +- // subnetwork. During Subnet creation, specifying vlan is valid only if +- // enable_l2 is true. During Subnet Update, specifying vlan is allowed +- // only for l2 enabled subnets. Restricted to only one VLAN. +- Vlans []int64 `json:"vlans,omitempty"` ++ // Zone: [Output Only] URL of the zone where the storage pool resides. ++ // You must specify this field as part of the HTTP request URL. It is ++ // not settable as a field in the request body. ++ Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + +- // ForceSendFields is a list of field names (e.g. "AggregationInterval") ++ // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -60130,7 +60721,7 @@ type Subnetwork struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "AggregationInterval") to ++ // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the +@@ -60140,36 +60731,25 @@ type Subnetwork struct { + NullFields []string `json:"-"` + } + +-func (s *Subnetwork) MarshalJSON() ([]byte, error) { +- type NoMethod Subnetwork ++func (s *StoragePool) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePool + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-func (s *Subnetwork) UnmarshalJSON(data []byte) error { +- type NoMethod Subnetwork +- var s1 struct { +- FlowSampling gensupport.JSONFloat64 `json:"flowSampling"` +- *NoMethod +- } +- s1.NoMethod = (*NoMethod)(s) +- if err := json.Unmarshal(data, &s1); err != nil { +- return err +- } +- s.FlowSampling = float64(s1.FlowSampling) +- return nil +-} ++type StoragePoolAggregatedList struct { ++ Etag string `json:"etag,omitempty"` + +-type SubnetworkAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + +- // Items: A list of SubnetworksScopedList resources. +- Items map[string]SubnetworksScopedList `json:"items,omitempty"` ++ // Items: A list of StoragePoolsScopedList resources. ++ Items map[string]StoragePoolsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always +- // compute#subnetworkAggregatedList for aggregated lists of subnetworks. ++ // compute#storagePoolAggregatedList for aggregated lists of storage ++ // pools. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next +@@ -60187,13 +60767,13 @@ type SubnetworkAggregatedList struct { + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. +- Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` ++ Warning *StoragePoolAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + +- // ForceSendFields is a list of field names (e.g. "Id") to ++ // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -60201,7 +60781,7 @@ type SubnetworkAggregatedList struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Id") to include in API ++ // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as +@@ -60210,15 +60790,942 @@ type SubnetworkAggregatedList struct { + NullFields []string `json:"-"` + } + +-func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { +- type NoMethod SubnetworkAggregatedList ++func (s *StoragePoolAggregatedList) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// SubnetworkAggregatedListWarning: [Output Only] Informational warning ++// StoragePoolAggregatedListWarning: [Output Only] Informational warning + // message. +-type SubnetworkAggregatedListWarning struct { ++type StoragePoolAggregatedListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*StoragePoolAggregatedListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolAggregatedListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolAggregatedListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type StoragePoolAggregatedListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolAggregatedListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// StoragePoolList: A list of StoragePool resources. ++type StoragePoolList struct { ++ Etag string `json:"etag,omitempty"` ++ ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of StoragePool resources. ++ Items []*StoragePool `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always compute#storagePoolList ++ // for lists of storagePools. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token allows you to get the next ++ // page of results for list requests. If the number of results is larger ++ // than maxResults, use the nextPageToken as a value for the query ++ // parameter pageToken in the next list request. Subsequent list ++ // requests will have their own nextPageToken to continue paging through ++ // the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Unreachables: [Output Only] Unreachable resources. end_interface: ++ // MixerListResponseWithEtagBuilder ++ Unreachables []string `json:"unreachables,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *StoragePoolListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Etag") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Etag") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolList) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// StoragePoolListWarning: [Output Only] Informational warning message. ++type StoragePoolListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*StoragePoolListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type StoragePoolListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// StoragePoolResourceStatus: [Output Only] Contains output only fields. ++type StoragePoolResourceStatus struct { ++ // AggregateDiskProvisionedIops: [Output Only] Sum of all the disk' ++ // provisioned IOPS. ++ AggregateDiskProvisionedIops int64 `json:"aggregateDiskProvisionedIops,omitempty,string"` ++ ++ // AggregateDiskSizeGb: [Output Only] Sum of all the capacity ++ // provisioned in disks in this storage pool. A disk's provisioned ++ // capacity is the same as its total capacity. ++ AggregateDiskSizeGb int64 `json:"aggregateDiskSizeGb,omitempty,string"` ++ ++ // LastResizeTimestamp: [Output Only] Timestamp of the last successful ++ // resize in RFC3339 text format. ++ LastResizeTimestamp string `json:"lastResizeTimestamp,omitempty"` ++ ++ // MaxAggregateDiskSizeGb: [Output Only] Maximum allowed aggregate disk ++ // size in gigabytes. ++ MaxAggregateDiskSizeGb int64 `json:"maxAggregateDiskSizeGb,omitempty,string"` ++ ++ // NumberOfDisks: [Output Only] Number of disks used. ++ NumberOfDisks int64 `json:"numberOfDisks,omitempty,string"` ++ ++ // UsedBytes: [Output Only] Space used by data stored in disks within ++ // the storage pool (in bytes). ++ UsedBytes int64 `json:"usedBytes,omitempty,string"` ++ ++ // UsedReducedBytes: [Output Only] Space used by compressed and deduped ++ // data stored in disks within the storage pool (in bytes). ++ UsedReducedBytes int64 `json:"usedReducedBytes,omitempty,string"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "AggregateDiskProvisionedIops") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. ++ // "AggregateDiskProvisionedIops") to include in API requests with the ++ // JSON null value. By default, fields with empty values are omitted ++ // from API requests. However, any field with an empty value appearing ++ // in NullFields will be sent to the server as null. It is an error if a ++ // field in this list has a non-empty value. This may be used to include ++ // null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolResourceStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolResourceStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type StoragePoolsScopedList struct { ++ // StoragePools: [Output Only] A list of storage pool contained in this ++ // scope. ++ StoragePools []*StoragePool `json:"storagePools,omitempty"` ++ ++ // Warning: [Output Only] Informational warning which replaces the list ++ // of storage pool when the list is empty. ++ Warning *StoragePoolsScopedListWarning `json:"warning,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "StoragePools") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "StoragePools") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolsScopedList) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolsScopedList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// StoragePoolsScopedListWarning: [Output Only] Informational warning ++// which replaces the list of storage pool when the list is empty. ++type StoragePoolsScopedListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*StoragePoolsScopedListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolsScopedListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolsScopedListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type StoragePoolsScopedListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *StoragePoolsScopedListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod StoragePoolsScopedListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// Subnetwork: Represents a Subnetwork resource. A subnetwork (also ++// known as a subnet) is a logical partition of a Virtual Private Cloud ++// network with one primary IP range and zero or more secondary IP ++// ranges. For more information, read Virtual Private Cloud (VPC) ++// Network. ++type Subnetwork struct { ++ // AggregationInterval: Can only be specified if VPC flow logging for ++ // this subnetwork is enabled. Sets the aggregation interval for ++ // collecting flow logs. Increasing the interval time reduces the amount ++ // of generated flow logs for long-lasting connections. Default is an ++ // interval of 5 seconds per connection. Valid values: INTERVAL_5_SEC, ++ // INTERVAL_30_SEC, INTERVAL_1_MIN, INTERVAL_5_MIN, INTERVAL_10_MIN, ++ // INTERVAL_15_MIN. ++ // ++ // Possible values: ++ // "INTERVAL_10_MIN" ++ // "INTERVAL_15_MIN" ++ // "INTERVAL_1_MIN" ++ // "INTERVAL_30_SEC" ++ // "INTERVAL_5_MIN" ++ // "INTERVAL_5_SEC" ++ AggregationInterval string `json:"aggregationInterval,omitempty"` ++ ++ // AllowSubnetCidrRoutesOverlap: Whether this subnetwork's ranges can ++ // conflict with existing static routes. Setting this to true allows ++ // this subnetwork's primary and secondary ranges to overlap with (and ++ // contain) static routes that have already been configured on the ++ // corresponding network. For example if a static route has range ++ // 10.1.0.0/16, a subnet range 10.0.0.0/8 could only be created if ++ // allow_conflicting_routes=true. Overlapping is only allowed on ++ // subnetwork operations; routes whose ranges conflict with this ++ // subnetwork's ranges won't be allowed unless ++ // route.allow_conflicting_subnetworks is set to true. Typically packets ++ // destined to IPs within the subnetwork (which may contain ++ // private/sensitive data) are prevented from leaving the virtual ++ // network. Setting this field to true will disable this feature. The ++ // default value is false and applies to all existing subnetworks and ++ // automatically created subnetworks. This field cannot be set to true ++ // at resource creation time. ++ AllowSubnetCidrRoutesOverlap bool `json:"allowSubnetCidrRoutesOverlap,omitempty"` ++ ++ // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text ++ // format. ++ CreationTimestamp string `json:"creationTimestamp,omitempty"` ++ ++ // Description: An optional description of this resource. Provide this ++ // property when you create the resource. This field can be set only at ++ // resource creation time. ++ Description string `json:"description,omitempty"` ++ ++ // EnableFlowLogs: Whether to enable flow logging for this subnetwork. ++ // If this field is not explicitly set, it will not appear in get ++ // listings. If not set the default behavior is determined by the org ++ // policy, if there is no org policy specified, then it will default to ++ // disabled. This field isn't supported if the subnet purpose field is ++ // set to REGIONAL_MANAGED_PROXY. ++ EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` ++ ++ // EnableL2: Enables Layer2 communication on the subnetwork. ++ EnableL2 bool `json:"enableL2,omitempty"` ++ ++ // EnablePrivateV6Access: Deprecated in favor of enable in ++ // PrivateIpv6GoogleAccess. Whether the VMs in this subnet can directly ++ // access Google services via internal IPv6 addresses. This field can be ++ // both set at resource creation time and updated using patch. ++ EnablePrivateV6Access bool `json:"enablePrivateV6Access,omitempty"` ++ ++ // ExternalIpv6Prefix: The external IPv6 address range that is owned by ++ // this subnetwork. ++ ExternalIpv6Prefix string `json:"externalIpv6Prefix,omitempty"` ++ ++ // Fingerprint: Fingerprint of this resource. A hash of the contents ++ // stored in this object. This field is used in optimistic locking. This ++ // field will be ignored when inserting a Subnetwork. An up-to-date ++ // fingerprint must be provided in order to update the Subnetwork, ++ // otherwise the request will fail with error 412 conditionNotMet. To ++ // see the latest fingerprint, make a get() request to retrieve a ++ // Subnetwork. ++ Fingerprint string `json:"fingerprint,omitempty"` ++ ++ // FlowSampling: Can only be specified if VPC flow logging for this ++ // subnetwork is enabled. The value of the field must be in [0, 1]. Set ++ // the sampling rate of VPC flow logs within the subnetwork where 1.0 ++ // means all collected logs are reported and 0.0 means no logs are ++ // reported. Default is 0.5 unless otherwise specified by the org ++ // policy, which means half of all collected logs are reported. ++ FlowSampling float64 `json:"flowSampling,omitempty"` ++ ++ // GatewayAddress: [Output Only] The gateway address for default routes ++ // to reach destination addresses outside this subnetwork. ++ GatewayAddress string `json:"gatewayAddress,omitempty"` ++ ++ // Id: [Output Only] The unique identifier for the resource. This ++ // identifier is defined by the server. ++ Id uint64 `json:"id,omitempty,string"` ++ ++ // InternalIpv6Prefix: [Output Only] The internal IPv6 address range ++ // that is assigned to this subnetwork. ++ InternalIpv6Prefix string `json:"internalIpv6Prefix,omitempty"` ++ ++ // IpCidrRange: The range of internal addresses that are owned by this ++ // subnetwork. Provide this property when you create the subnetwork. For ++ // example, 10.0.0.0/8 or 100.64.0.0/10. Ranges must be unique and ++ // non-overlapping within a network. Only IPv4 is supported. This field ++ // is set at resource creation time. The range can be any range listed ++ // in the Valid ranges list. The range can be expanded after creation ++ // using expandIpCidrRange. ++ IpCidrRange string `json:"ipCidrRange,omitempty"` ++ ++ // Ipv6AccessType: The access type of IPv6 address this subnet holds. ++ // It's immutable and can only be specified during creation or the first ++ // time the subnet is updated into IPV4_IPV6 dual stack. ++ // ++ // Possible values: ++ // "EXTERNAL" - VMs on this subnet will be assigned IPv6 addresses ++ // that are accessible via the Internet, as well as the VPC network. ++ // "INTERNAL" - VMs on this subnet will be assigned IPv6 addresses ++ // that are only accessible over the VPC network. ++ Ipv6AccessType string `json:"ipv6AccessType,omitempty"` ++ ++ // Ipv6CidrRange: [Output Only] This field is for internal use. ++ Ipv6CidrRange string `json:"ipv6CidrRange,omitempty"` ++ ++ // Kind: [Output Only] Type of the resource. Always compute#subnetwork ++ // for Subnetwork resources. ++ Kind string `json:"kind,omitempty"` ++ ++ // LogConfig: This field denotes the VPC flow logging options for this ++ // subnetwork. If logging is enabled, logs are exported to Cloud ++ // Logging. ++ LogConfig *SubnetworkLogConfig `json:"logConfig,omitempty"` ++ ++ // Metadata: Can only be specified if VPC flow logging for this ++ // subnetwork is enabled. Configures whether metadata fields should be ++ // added to the reported VPC flow logs. Options are ++ // INCLUDE_ALL_METADATA, EXCLUDE_ALL_METADATA, and CUSTOM_METADATA. ++ // Default is EXCLUDE_ALL_METADATA. ++ // ++ // Possible values: ++ // "EXCLUDE_ALL_METADATA" ++ // "INCLUDE_ALL_METADATA" ++ Metadata string `json:"metadata,omitempty"` ++ ++ // Name: The name of the resource, provided by the client when initially ++ // creating the resource. The name must be 1-63 characters long, and ++ // comply with RFC1035. Specifically, the name must be 1-63 characters ++ // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` ++ // which means the first character must be a lowercase letter, and all ++ // following characters must be a dash, lowercase letter, or digit, ++ // except the last character, which cannot be a dash. ++ Name string `json:"name,omitempty"` ++ ++ // Network: The URL of the network to which this subnetwork belongs, ++ // provided by the client when initially creating the subnetwork. This ++ // field can be set only at resource creation time. ++ Network string `json:"network,omitempty"` ++ ++ // PrivateIpGoogleAccess: Whether the VMs in this subnet can access ++ // Google services without assigned external IP addresses. This field ++ // can be both set at resource creation time and updated using ++ // setPrivateIpGoogleAccess. ++ PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` ++ ++ // PrivateIpv6GoogleAccess: This field is for internal use. This field ++ // can be both set at resource creation time and updated using patch. ++ // ++ // Possible values: ++ // "DISABLE_GOOGLE_ACCESS" - Disable private IPv6 access to/from ++ // Google services. ++ // "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE" - Bidirectional private ++ // IPv6 access to/from Google services. ++ // "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE" - Outbound private IPv6 ++ // access from VMs in this subnet to Google services. ++ PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` ++ ++ // Purpose: The purpose of the resource. This field can be either ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. ++ // ++ // Possible values: ++ // "AGGREGATE" - Subnetwork used to aggregate multiple private ++ // subnetworks. ++ // "CLOUD_EXTENSION" - Subnetworks created for Cloud Extension ++ // Machines. ++ // "GLOBAL_MANAGED_PROXY" - Subnet reserved for Global Internal ++ // HTTP(S) Load Balancing. ++ // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal ++ // HTTP(S) Load Balancing. ++ // "PRIVATE" - Regular user created or automatically created subnet. ++ // "PRIVATE_NAT" - Subnetwork used as source range for Private NAT ++ // Gateways. ++ // "PRIVATE_RFC_1918" - Regular user created or automatically created ++ // subnet. ++ // "PRIVATE_SERVICE_CONNECT" - Subnetworks created for Private Service ++ // Connect in the producer network. ++ // "REGIONAL_MANAGED_PROXY" - Subnetwork used for Regional ++ // Internal/External HTTP(S) Load Balancing. ++ Purpose string `json:"purpose,omitempty"` ++ ++ // Region: URL of the region where the Subnetwork resides. This field ++ // can be set only at resource creation time. ++ Region string `json:"region,omitempty"` ++ ++ // ReservedInternalRange: The URL of the reserved internal range. ++ ReservedInternalRange string `json:"reservedInternalRange,omitempty"` ++ ++ // Role: The role of subnetwork. Currently, this field is only used when ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one ++ // that is ready to be promoted to ACTIVE or is currently draining. This ++ // field can be updated with a patch request. ++ // ++ // Possible values: ++ // "ACTIVE" - The ACTIVE subnet that is currently used. ++ // "BACKUP" - The BACKUP subnet that could be promoted to ACTIVE. ++ Role string `json:"role,omitempty"` ++ ++ // SecondaryIpRanges: An array of configurations for secondary IP ranges ++ // for VM instances contained in this subnetwork. The primary IP of such ++ // VM must belong to the primary ipCidrRange of the subnetwork. The ++ // alias IPs may belong to either primary or secondary ranges. This ++ // field can be updated with a patch request. ++ SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for the resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // SelfLinkWithId: [Output Only] Server-defined URL for this resource ++ // with the resource id. ++ SelfLinkWithId string `json:"selfLinkWithId,omitempty"` ++ ++ // StackType: The stack type for the subnet. If set to IPV4_ONLY, new ++ // VMs in the subnet are assigned IPv4 addresses only. If set to ++ // IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 ++ // addresses. If not specified, IPV4_ONLY is used. This field can be ++ // both set at resource creation time and updated using patch. ++ // ++ // Possible values: ++ // "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6 ++ // addresses. ++ // "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 ++ // addresses. ++ StackType string `json:"stackType,omitempty"` ++ ++ // State: [Output Only] The state of the subnetwork, which can be one of ++ // the following values: READY: Subnetwork is created and ready to use ++ // DRAINING: only applicable to subnetworks that have the purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER and indicates that connections to the ++ // load balancer are being drained. A subnetwork that is draining cannot ++ // be used or modified until it reaches a status of READY ++ // ++ // Possible values: ++ // "DRAINING" - Subnetwork is being drained. ++ // "READY" - Subnetwork is ready for use. ++ State string `json:"state,omitempty"` ++ ++ // Vlans: A repeated field indicating the VLAN IDs supported on this ++ // subnetwork. During Subnet creation, specifying vlan is valid only if ++ // enable_l2 is true. During Subnet Update, specifying vlan is allowed ++ // only for l2 enabled subnets. Restricted to only one VLAN. ++ Vlans []int64 `json:"vlans,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "AggregationInterval") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AggregationInterval") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *Subnetwork) MarshalJSON() ([]byte, error) { ++ type NoMethod Subnetwork ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++func (s *Subnetwork) UnmarshalJSON(data []byte) error { ++ type NoMethod Subnetwork ++ var s1 struct { ++ FlowSampling gensupport.JSONFloat64 `json:"flowSampling"` ++ *NoMethod ++ } ++ s1.NoMethod = (*NoMethod)(s) ++ if err := json.Unmarshal(data, &s1); err != nil { ++ return err ++ } ++ s.FlowSampling = float64(s1.FlowSampling) ++ return nil ++} ++ ++type SubnetworkAggregatedList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of SubnetworksScopedList resources. ++ Items map[string]SubnetworksScopedList `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always ++ // compute#subnetworkAggregatedList for aggregated lists of subnetworks. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token allows you to get the next ++ // page of results for list requests. If the number of results is larger ++ // than maxResults, use the nextPageToken as a value for the query ++ // parameter pageToken in the next list request. Subsequent list ++ // requests will have their own nextPageToken to continue paging through ++ // the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Unreachables: [Output Only] Unreachable resources. ++ Unreachables []string `json:"unreachables,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { ++ type NoMethod SubnetworkAggregatedList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// SubnetworkAggregatedListWarning: [Output Only] Informational warning ++// message. ++type SubnetworkAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. +@@ -60563,6 +62070,8 @@ type SubnetworkLogConfig struct { + // field is not explicitly set, it will not appear in get listings. If + // not set the default behavior is determined by the org policy, if + // there is no org policy specified, then it will default to disabled. ++ // Flow logging isn't supported if the subnet purpose field is set to ++ // REGIONAL_MANAGED_PROXY. + Enable bool `json:"enable,omitempty"` + + // FilterExpr: Can only be specified if VPC flow logs for this +@@ -61575,6 +63084,15 @@ type TargetHttpProxy struct { + // ForwardingRule for more details. + HttpFilters []string `json:"httpFilters,omitempty"` + ++ // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection ++ // open, after completing a response, while there is no matching traffic ++ // (in seconds). If an HTTP keep-alive is not specified, a default value ++ // (610 seconds) will be used. For Global external HTTP(S) load ++ // balancer, the minimum allowed value is 5 seconds and the maximum ++ // allowed value is 1200 seconds. For Global external HTTP(S) load ++ // balancer (classic), this option is not available publicly. ++ HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` ++ + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` +@@ -62201,7 +63719,9 @@ func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) + + type TargetHttpsProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetHttpsProxy. ++ // TargetHttpsProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -62324,7 +63844,9 @@ type TargetHttpsProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -62359,6 +63881,15 @@ type TargetHttpsProxy struct { + // ForwardingRule for more details. + HttpFilters []string `json:"httpFilters,omitempty"` + ++ // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection ++ // open, after completing a response, while there is no matching traffic ++ // (in seconds). If an HTTP keep-alive is not specified, a default value ++ // (610 seconds) will be used. For Global external HTTP(S) load ++ // balancer, the minimum allowed value is 5 seconds and the maximum ++ // allowed value is 1200 seconds. For Global external HTTP(S) load ++ // balancer (classic), this option is not available publicly. ++ HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` ++ + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` +@@ -62421,9 +63952,11 @@ type TargetHttpsProxy struct { + // networksecurity.ServerTlsPolicy resource that describes how the proxy + // should authenticate inbound traffic. serverTlsPolicy only applies to + // a global TargetHttpsProxy attached to globalForwardingRules with the +- // loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, +- // communications are not encrypted. Note: This field currently has no +- // impact. ++ // loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or ++ // EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are ++ // accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, ++ // EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy ++ // documentation. If left blank, communications are not encrypted. + ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to +@@ -64425,7 +65958,9 @@ func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) + + type TargetSslProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetSslProxy. ++ // TargetSslProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -64523,7 +66058,9 @@ type TargetSslProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -67662,12 +69199,20 @@ type UsableSubnetwork struct { + Network string `json:"network,omitempty"` + + // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. + // + // Possible values: + // "AGGREGATE" - Subnetwork used to aggregate multiple private +@@ -67690,9 +69235,9 @@ type UsableSubnetwork struct { + Purpose string `json:"purpose,omitempty"` + + // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // +@@ -68428,6 +69973,7 @@ type VpnGateway struct { + // Possible values: + // "IPV4_IPV6" - Enable VPN gateway with both IPv4 and IPv6 protocols. + // "IPV4_ONLY" - Enable VPN gateway with only IPv4 protocol. ++ // "IPV6_ONLY" - Enable VPN gateway with only IPv6 protocol. + StackType string `json:"stackType,omitempty"` + + // VpnInterfaces: The list of VPN interfaces associated with this VPN +@@ -68931,7 +70477,7 @@ type VpnGatewayStatusTunnel struct { + + // PeerGatewayInterface: The peer gateway interface this VPN tunnel is + // connected to, the peer gateway could either be an external VPN +- // gateway or GCP VPN gateway. ++ // gateway or a Google Cloud VPN gateway. + PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` + + // TunnelUrl: URL reference to the VPN tunnel. +@@ -68964,8 +70510,8 @@ func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { + + // VpnGatewayStatusVpnConnection: A VPN connection contains all VPN + // tunnels connected from this VpnGateway to the same peer gateway. The +-// peer gateway could either be a external VPN gateway or GCP VPN +-// gateway. ++// peer gateway could either be an external VPN gateway or a Google ++// Cloud VPN gateway. + type VpnGatewayStatusVpnConnection struct { + // PeerExternalGateway: URL reference to the peer external VPN gateways + // to which the VPN tunnels in this VPN connection are connected. This +@@ -93130,9 +94676,7 @@ func (r *GlobalAddressesService) GetOwnerInstance(project string) *GlobalAddress + return c + } + +-// IpAddress sets the optional parameter "ipAddress": The ip_address +-// could be external IPv4, or internal IPv4 within IPv6 form of +-// virtual_network_id with internal IPv4. IPv6 is not supported yet. ++// IpAddress sets the optional parameter "ipAddress": The VM IP address. + func (c *GlobalAddressesGetOwnerInstanceCall) IpAddress(ipAddress string) *GlobalAddressesGetOwnerInstanceCall { + c.urlParams_.Set("ipAddress", ipAddress) + return c +@@ -93246,7 +94790,7 @@ func (c *GlobalAddressesGetOwnerInstanceCall) Do(opts ...googleapi.CallOption) ( + // ], + // "parameters": { + // "ipAddress": { +- // "description": "The ip_address could be external IPv4, or internal IPv4 within IPv6 form of virtual_network_id with internal IPv4. IPv6 is not supported yet.", ++ // "description": "The VM IP address.", + // "location": "query", + // "type": "string" + // }, +@@ -111205,6 +112749,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C + } + return ret, nil + // { ++ // "deprecated": true, + // "description": "Motifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use instanceGroupManagers.patch instead.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "httpMethod": "POST", +@@ -123543,32 +125088,33 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio + + } + +-// method id "compute.instances.setServiceAccount": ++// method id "compute.instances.setSecurityPolicy": + +-type InstancesSetServiceAccountCall struct { ++type InstancesSetSecurityPolicyCall struct { + s *Service + project string + zone string + instance string +- instancessetserviceaccountrequest *InstancesSetServiceAccountRequest ++ instancessetsecuritypolicyrequest *InstancesSetSecurityPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// SetServiceAccount: Sets the service account on the instance. For more +-// information, read Changing the service account and access scopes for +-// an instance. ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified instance. For more information, see Google Cloud Armor ++// Overview + // +-// - instance: Name of the instance resource to start. +-// - project: Project ID for this request. +-// - zone: The name of the zone for this request. +-func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { +- c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instance: Name of the Instance resource to which the security ++// policy should be set. The name should conform to RFC1035. ++// - project: Project ID for this request. ++// - zone: Name of the zone scoping this request. ++func (r *InstancesService) SetSecurityPolicy(project string, zone string, instance string, instancessetsecuritypolicyrequest *InstancesSetSecurityPolicyRequest) *InstancesSetSecurityPolicyCall { ++ c := &InstancesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.instancessetserviceaccountrequest = instancessetserviceaccountrequest ++ c.instancessetsecuritypolicyrequest = instancessetsecuritypolicyrequest + return c + } + +@@ -123583,7 +125129,7 @@ func (r *InstancesService) SetServiceAccount(project string, zone string, instan + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) RequestId(requestId string) *InstancesSetSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -123591,7 +125137,7 @@ func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesS + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetSecurityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -123599,21 +125145,21 @@ func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *Instances + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) Context(ctx context.Context) *InstancesSetSecurityPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetServiceAccountCall) Header() http.Header { ++func (c *InstancesSetSecurityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -123621,14 +125167,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetsecuritypolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -123643,14 +125189,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setServiceAccount" call. ++// Do executes the "compute.instances.setSecurityPolicy" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123681,10 +125227,10 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + } + return ret, nil + // { +- // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "description": "Sets the Google Cloud Armor security policy for the specified instance. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + // "httpMethod": "POST", +- // "id": "compute.instances.setServiceAccount", ++ // "id": "compute.instances.setSecurityPolicy", + // "parameterOrder": [ + // "project", + // "zone", +@@ -123692,9 +125238,8 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + // ], + // "parameters": { + // "instance": { +- // "description": "Name of the instance resource to start.", ++ // "description": "Name of the Instance resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -123711,16 +125256,16 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + // "type": "string" + // }, + // "zone": { +- // "description": "The name of the zone for this request.", ++ // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + // "request": { +- // "$ref": "InstancesSetServiceAccountRequest" ++ // "$ref": "InstancesSetSecurityPolicyRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -123733,33 +125278,32 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + + } + +-// method id "compute.instances.setShieldedInstanceIntegrityPolicy": ++// method id "compute.instances.setServiceAccount": + +-type InstancesSetShieldedInstanceIntegrityPolicyCall struct { +- s *Service +- project string +- zone string +- instance string +- shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesSetServiceAccountCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ instancessetserviceaccountrequest *InstancesSetServiceAccountRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance +-// integrity policy for an instance. You can only use this method on a +-// running instance. This method supports PATCH semantics and uses the +-// JSON merge patch format and processing rules. ++// SetServiceAccount: Sets the service account on the instance. For more ++// information, read Changing the service account and access scopes for ++// an instance. + // +-// - instance: Name or id of the instance scoping this request. ++// - instance: Name of the instance resource to start. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { +- c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { ++ c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy ++ c.instancessetserviceaccountrequest = instancessetserviceaccountrequest + return c + } + +@@ -123774,7 +125318,7 @@ func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zo + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -123782,7 +125326,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId st + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -123790,21 +125334,21 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi. + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { ++func (c *InstancesSetServiceAccountCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -123812,16 +125356,16 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -123834,14 +125378,14 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. ++// Do executes the "compute.instances.setServiceAccount" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123872,10 +125416,10 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + } + return ret, nil + // { +- // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", +- // "httpMethod": "PATCH", +- // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", ++ // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "httpMethod": "POST", ++ // "id": "compute.instances.setServiceAccount", + // "parameterOrder": [ + // "project", + // "zone", +@@ -123883,7 +125427,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + // ], + // "parameters": { + // "instance": { +- // "description": "Name or id of the instance scoping this request.", ++ // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -123909,9 +125453,9 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "request": { +- // "$ref": "ShieldedInstanceIntegrityPolicy" ++ // "$ref": "InstancesSetServiceAccountRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -123924,33 +125468,33 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + + } + +-// method id "compute.instances.setShieldedVmIntegrityPolicy": ++// method id "compute.instances.setShieldedInstanceIntegrityPolicy": + +-type InstancesSetShieldedVmIntegrityPolicyCall struct { +- s *Service +- project string +- zone string +- instance string +- shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesSetShieldedInstanceIntegrityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy +-// for a VM instance. You can only use this method on a running VM +-// instance. This method supports PATCH semantics and uses the JSON +-// merge patch format and processing rules. ++// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance ++// integrity policy for an instance. You can only use this method on a ++// running instance. This method supports PATCH semantics and uses the ++// JSON merge patch format and processing rules. + // +-// - instance: Name of the instance scoping this request. ++// - instance: Name or id of the instance scoping this request. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { +- c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++ c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.shieldedvmintegritypolicy = shieldedvmintegritypolicy ++ c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy + return c + } + +@@ -123965,7 +125509,7 @@ func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone str + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -123973,7 +125517,7 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -123981,21 +125525,21 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124003,14 +125547,14 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { +@@ -124025,14 +125569,14 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. ++// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124063,10 +125607,10 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt + } + return ret, nil + // { +- // "description": "Sets the Shielded VM integrity policy for a VM instance. You can only use this method on a running VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", ++ // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "httpMethod": "PATCH", +- // "id": "compute.instances.setShieldedVmIntegrityPolicy", ++ // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + // "parameterOrder": [ + // "project", + // "zone", +@@ -124074,7 +125618,7 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt + // ], + // "parameters": { + // "instance": { +- // "description": "Name of the instance scoping this request.", ++ // "description": "Name or id of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -124100,9 +125644,9 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "request": { +- // "$ref": "ShieldedVmIntegrityPolicy" ++ // "$ref": "ShieldedInstanceIntegrityPolicy" + // }, + // "response": { + // "$ref": "Operation" +@@ -124115,31 +125659,33 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt + + } + +-// method id "compute.instances.setTags": ++// method id "compute.instances.setShieldedVmIntegrityPolicy": + +-type InstancesSetTagsCall struct { +- s *Service +- project string +- zone string +- instance string +- tags *Tags +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesSetShieldedVmIntegrityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetTags: Sets network tags for the specified instance to the data +-// included in the request. ++// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy ++// for a VM instance. You can only use this method on a running VM ++// instance. This method supports PATCH semantics and uses the JSON ++// merge patch format and processing rules. + // + // - instance: Name of the instance scoping this request. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { +- c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { ++ c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.tags = tags ++ c.shieldedvmintegritypolicy = shieldedvmintegritypolicy + return c + } + +@@ -124154,7 +125700,7 @@ func (r *InstancesService) SetTags(project string, zone string, instance string, + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -124162,7 +125708,7 @@ func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124170,21 +125716,21 @@ func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCal + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetTagsCall) Header() http.Header { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124192,16 +125738,16 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setTags") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } +@@ -124214,14 +125760,14 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setTags" call. ++// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124252,10 +125798,10 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err + } + return ret, nil + // { +- // "description": "Sets network tags for the specified instance to the data included in the request.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", +- // "httpMethod": "POST", +- // "id": "compute.instances.setTags", ++ // "description": "Sets the Shielded VM integrity policy for a VM instance. You can only use this method on a running VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", ++ // "httpMethod": "PATCH", ++ // "id": "compute.instances.setShieldedVmIntegrityPolicy", + // "parameterOrder": [ + // "project", + // "zone", +@@ -124289,9 +125835,9 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", + // "request": { +- // "$ref": "Tags" ++ // "$ref": "ShieldedVmIntegrityPolicy" + // }, + // "response": { + // "$ref": "Operation" +@@ -124304,29 +125850,31 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err + + } + +-// method id "compute.instances.simulateMaintenanceEvent": ++// method id "compute.instances.setTags": + +-type InstancesSimulateMaintenanceEventCall struct { ++type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string ++ tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// SimulateMaintenanceEvent: Simulates a host maintenance event on a VM. +-// For more information, see Simulate a host maintenance event. ++// SetTags: Sets network tags for the specified instance to the data ++// included in the request. + // + // - instance: Name of the instance scoping this request. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { +- c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { ++ c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance ++ c.tags = tags + return c + } + +@@ -124341,23 +125889,15 @@ func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { ++func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { + c.urlParams_.Set("requestId", requestId) + return c + } + +-// WithExtendedNotifications sets the optional parameter +-// "withExtendedNotifications": Determines whether the customers receive +-// notifications before migration. Only applicable to SF vms. +-func (c *InstancesSimulateMaintenanceEventCall) WithExtendedNotifications(withExtendedNotifications bool) *InstancesSimulateMaintenanceEventCall { +- c.urlParams_.Set("withExtendedNotifications", fmt.Sprint(withExtendedNotifications)) +- return c +-} +- + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { ++func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124365,21 +125905,21 @@ func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *In + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { ++func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { ++func (c *InstancesSetTagsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124387,9 +125927,14 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setTags") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -124404,14 +125949,14 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.simulateMaintenanceEvent" call. ++// Do executes the "compute.instances.setTags" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124442,10 +125987,10 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", ++ // "description": "Sets network tags for the specified instance to the data included in the request.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + // "httpMethod": "POST", +- // "id": "compute.instances.simulateMaintenanceEvent", ++ // "id": "compute.instances.setTags", + // "parameterOrder": [ + // "project", + // "zone", +@@ -124471,11 +126016,6 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) + // "location": "query", + // "type": "string" + // }, +- // "withExtendedNotifications": { +- // "description": "Determines whether the customers receive notifications before migration. Only applicable to SF vms.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", +@@ -124484,7 +126024,10 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", ++ // "request": { ++ // "$ref": "Tags" ++ // }, + // "response": { + // "$ref": "Operation" + // }, +@@ -124496,9 +126039,9 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.instances.start": ++// method id "compute.instances.simulateMaintenanceEvent": + +-type InstancesStartCall struct { ++type InstancesSimulateMaintenanceEventCall struct { + s *Service + project string + zone string +@@ -124508,14 +126051,14 @@ type InstancesStartCall struct { + header_ http.Header + } + +-// Start: Starts an instance that was stopped using the instances().stop +-// method. For more information, see Restart an instance. ++// SimulateMaintenanceEvent: Simulates a host maintenance event on a VM. ++// For more information, see Simulate a host maintenance event. + // +-// - instance: Name of the instance resource to start. ++// - instance: Name of the instance scoping this request. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { +- c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { ++ c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +@@ -124533,15 +126076,23 @@ func (r *InstancesService) Start(project string, zone string, instance string) * + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("requestId", requestId) + return c + } + ++// WithExtendedNotifications sets the optional parameter ++// "withExtendedNotifications": Determines whether the customers receive ++// notifications before migration. Only applicable to SF vms. ++func (c *InstancesSimulateMaintenanceEventCall) WithExtendedNotifications(withExtendedNotifications bool) *InstancesSimulateMaintenanceEventCall { ++ c.urlParams_.Set("withExtendedNotifications", fmt.Sprint(withExtendedNotifications)) ++ return c ++} ++ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124549,21 +126100,21 @@ func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesStartCall) Header() http.Header { ++func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124573,7 +126124,7 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -124588,14 +126139,14 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.start" call. ++// Do executes the "compute.instances.simulateMaintenanceEvent" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124626,10 +126177,10 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error + } + return ret, nil + // { +- // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/start", ++ // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "httpMethod": "POST", +- // "id": "compute.instances.start", ++ // "id": "compute.instances.simulateMaintenanceEvent", + // "parameterOrder": [ + // "project", + // "zone", +@@ -124637,7 +126188,7 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error + // ], + // "parameters": { + // "instance": { +- // "description": "Name of the instance resource to start.", ++ // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -124655,6 +126206,11 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error + // "location": "query", + // "type": "string" + // }, ++ // "withExtendedNotifications": { ++ // "description": "Determines whether the customers receive notifications before migration. Only applicable to SF vms.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", +@@ -124663,7 +126219,7 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/start", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "response": { + // "$ref": "Operation" + // }, +@@ -124675,32 +126231,29 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error + + } + +-// method id "compute.instances.startWithEncryptionKey": ++// method id "compute.instances.start": + +-type InstancesStartWithEncryptionKeyCall struct { +- s *Service +- project string +- zone string +- instance string +- instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesStartCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// StartWithEncryptionKey: Starts an instance that was stopped using the +-// instances().stop method. For more information, see Restart an +-// instance. ++// Start: Starts an instance that was stopped using the instances().stop ++// method. For more information, see Restart an instance. + // + // - instance: Name of the instance resource to start. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { +- c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { ++ c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest + return c + } + +@@ -124715,7 +126268,7 @@ func (r *InstancesService) StartWithEncryptionKey(project string, zone string, i + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { ++func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -124723,7 +126276,7 @@ func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *Insta + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { ++func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124731,21 +126284,21 @@ func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *Inst + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { ++func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { ++func (c *InstancesStartCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124753,14 +126306,9 @@ func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Respo + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -124775,14 +126323,14 @@ func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Respo + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.startWithEncryptionKey" call. ++// Do executes the "compute.instances.start" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124814,9 +126362,9 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( + return ret, nil + // { + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/start", + // "httpMethod": "POST", +- // "id": "compute.instances.startWithEncryptionKey", ++ // "id": "compute.instances.start", + // "parameterOrder": [ + // "project", + // "zone", +@@ -124850,10 +126398,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", +- // "request": { +- // "$ref": "InstancesStartWithEncryptionKeyRequest" +- // }, ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/start", + // "response": { + // "$ref": "Operation" + // }, +@@ -124865,41 +126410,32 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( + + } + +-// method id "compute.instances.stop": ++// method id "compute.instances.startWithEncryptionKey": + +-type InstancesStopCall struct { +- s *Service +- project string +- zone string +- instance string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesStartWithEncryptionKeyCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Stop: Stops a running instance, shutting it down cleanly, and allows +-// you to restart the instance at a later time. Stopped instances do not +-// incur VM usage charges while they are stopped. However, resources +-// that the VM is using, such as persistent disks and static IP +-// addresses, will continue to be charged until they are deleted. For +-// more information, see Stopping an instance. ++// StartWithEncryptionKey: Starts an instance that was stopped using the ++// instances().stop method. For more information, see Restart an ++// instance. + // +-// - instance: Name of the instance resource to stop. ++// - instance: Name of the instance resource to start. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { +- c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { ++ c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- return c +-} +- +-// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +-// true, discard the contents of any attached localSSD partitions. +-// Default value is false. +-func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { +- c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) ++ c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest + return c + } + +@@ -124914,7 +126450,7 @@ func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStop + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { ++func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -124922,7 +126458,7 @@ func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { ++func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124930,21 +126466,21 @@ func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { ++func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesStopCall) Header() http.Header { ++func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124952,9 +126488,14 @@ func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/stop") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -124969,14 +126510,14 @@ func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.stop" call. ++// Do executes the "compute.instances.startWithEncryptionKey" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -125007,23 +126548,18 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) + } + return ret, nil + // { +- // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/stop", ++ // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + // "httpMethod": "POST", +- // "id": "compute.instances.stop", ++ // "id": "compute.instances.startWithEncryptionKey", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { +- // "discardLocalSsd": { +- // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "instance": { +- // "description": "Name of the instance resource to stop.", ++ // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -125049,7 +126585,10 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/stop", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", ++ // "request": { ++ // "$ref": "InstancesStartWithEncryptionKeyRequest" ++ // }, + // "response": { + // "$ref": "Operation" + // }, +@@ -125061,9 +126600,9 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) + + } + +-// method id "compute.instances.suspend": ++// method id "compute.instances.stop": + +-type InstancesSuspendCall struct { ++type InstancesStopCall struct { + s *Service + project string + zone string +@@ -125073,20 +126612,18 @@ type InstancesSuspendCall struct { + header_ http.Header + } + +-// Suspend: This method suspends a running instance, saving its state to +-// persistent storage, and allows you to resume the instance at a later +-// time. Suspended instances have no compute costs (cores or RAM), and +-// incur only storage charges for the saved VM memory and localSSD data. +-// Any charged resources the virtual machine was using, such as +-// persistent disks and static IP addresses, will continue to be charged +-// while the instance is suspended. For more information, see Suspending +-// and resuming an instance. ++// Stop: Stops a running instance, shutting it down cleanly, and allows ++// you to restart the instance at a later time. Stopped instances do not ++// incur VM usage charges while they are stopped. However, resources ++// that the VM is using, such as persistent disks and static IP ++// addresses, will continue to be charged until they are deleted. For ++// more information, see Stopping an instance. + // +-// - instance: Name of the instance resource to suspend. ++// - instance: Name of the instance resource to stop. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { +- c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { ++ c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +@@ -125096,7 +126633,7 @@ func (r *InstancesService) Suspend(project string, zone string, instance string) + // DiscardLocalSsd sets the optional parameter "discardLocalSsd": If + // true, discard the contents of any attached localSSD partitions. + // Default value is false. +-func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { ++func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) + return c + } +@@ -125112,7 +126649,7 @@ func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesS + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { ++func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -125120,7 +126657,7 @@ func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { ++func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -125128,21 +126665,21 @@ func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCal + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { ++func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSuspendCall) Header() http.Header { ++func (c *InstancesStopCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -125152,7 +126689,7 @@ func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/suspend") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/stop") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -125167,14 +126704,14 @@ func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.suspend" call. ++// Do executes the "compute.instances.stop" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -125205,10 +126742,10 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err + } + return ret, nil + // { +- // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances have no compute costs (cores or RAM), and incur only storage charges for the saved VM memory and localSSD data. Any charged resources the virtual machine was using, such as persistent disks and static IP addresses, will continue to be charged while the instance is suspended. For more information, see Suspending and resuming an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/suspend", ++ // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/stop", + // "httpMethod": "POST", +- // "id": "compute.instances.suspend", ++ // "id": "compute.instances.stop", + // "parameterOrder": [ + // "project", + // "zone", +@@ -125221,7 +126758,205 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err + // "type": "boolean" + // }, + // "instance": { +- // "description": "Name of the instance resource to suspend.", ++ // "description": "Name of the instance resource to stop.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/stop", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.instances.suspend": ++ ++type InstancesSuspendCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Suspend: This method suspends a running instance, saving its state to ++// persistent storage, and allows you to resume the instance at a later ++// time. Suspended instances have no compute costs (cores or RAM), and ++// incur only storage charges for the saved VM memory and localSSD data. ++// Any charged resources the virtual machine was using, such as ++// persistent disks and static IP addresses, will continue to be charged ++// while the instance is suspended. For more information, see Suspending ++// and resuming an instance. ++// ++// - instance: Name of the instance resource to suspend. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { ++ c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.instance = instance ++ return c ++} ++ ++// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If ++// true, discard the contents of any attached localSSD partitions. ++// Default value is false. ++func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { ++ c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InstancesSuspendCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/suspend") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "instance": c.instance, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.instances.suspend" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances have no compute costs (cores or RAM), and incur only storage charges for the saved VM memory and localSSD data. Any charged resources the virtual machine was using, such as persistent disks and static IP addresses, will continue to be charged while the instance is suspended. For more information, see Suspending and resuming an instance.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/suspend", ++ // "httpMethod": "POST", ++ // "id": "compute.instances.suspend", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "instance" ++ // ], ++ // "parameters": { ++ // "discardLocalSsd": { ++ // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "instance": { ++ // "description": "Name of the instance resource to suspend.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -139125,38 +140860,56 @@ func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkA + } + } + +-// method id "compute.networkAttachments.setIamPolicy": ++// method id "compute.networkAttachments.patch": + +-type NetworkAttachmentsSetIamPolicyCall struct { +- s *Service +- project string +- region string +- resource string +- regionsetpolicyrequest *RegionSetPolicyRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsPatchCall struct { ++ s *Service ++ project string ++ region string ++ networkAttachment string ++ networkattachment *NetworkAttachment ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetIamPolicy: Sets the access control policy on the specified +-// resource. Replaces any existing policy. ++// Patch: Patches the specified NetworkAttachment resource with the data ++// included in the request. This method supports PATCH semantics and ++// uses JSON merge patch format and processing rules. + // ++// - networkAttachment: Name of the NetworkAttachment resource to patch. + // - project: Project ID for this request. +-// - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *NetworkAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NetworkAttachmentsSetIamPolicyCall { +- c := &NetworkAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region for this request. ++func (r *NetworkAttachmentsService) Patch(project string, region string, networkAttachment string, networkattachment *NetworkAttachment) *NetworkAttachmentsPatchCall { ++ c := &NetworkAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.resource = resource +- c.regionsetpolicyrequest = regionsetpolicyrequest ++ c.networkAttachment = networkAttachment ++ c.networkattachment = networkattachment ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). end_interface: ++// MixerMutationRequestBuilder ++func (c *NetworkAttachmentsPatchCall) RequestId(requestId string) *NetworkAttachmentsPatchCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsSetIamPolicyCall { ++func (c *NetworkAttachmentsPatchCall) Fields(s ...googleapi.Field) *NetworkAttachmentsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -139164,21 +140917,21 @@ func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *Netwo + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsSetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsSetIamPolicyCall { ++func (c *NetworkAttachmentsPatchCall) Context(ctx context.Context) *NetworkAttachmentsPatchCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsSetIamPolicyCall) Header() http.Header { ++func (c *NetworkAttachmentsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -139186,36 +140939,36 @@ func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Respon + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, ++ "project": c.project, ++ "region": c.region, ++ "networkAttachment": c.networkAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.setIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++// Do executes the "compute.networkAttachments.patch" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -139234,7 +140987,7 @@ func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Policy{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -139246,16 +140999,23 @@ func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { +- // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", +- // "httpMethod": "POST", +- // "id": "compute.networkAttachments.setIamPolicy", ++ // "description": "Patches the specified NetworkAttachment resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", ++ // "httpMethod": "PATCH", ++ // "id": "compute.networkAttachments.patch", + // "parameterOrder": [ + // "project", + // "region", +- // "resource" ++ // "networkAttachment" + // ], + // "parameters": { ++ // "networkAttachment": { ++ // "description": "Name of the NetworkAttachment resource to patch.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -139264,26 +141024,24 @@ func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + // "type": "string" + // }, + // "region": { +- // "description": "The name of the region for this request.", ++ // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", ++ // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "request": { +- // "$ref": "RegionSetPolicyRequest" ++ // "$ref": "NetworkAttachment" + // }, + // "response": { +- // "$ref": "Policy" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -139293,38 +141051,38 @@ func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + + } + +-// method id "compute.networkAttachments.testIamPermissions": ++// method id "compute.networkAttachments.setIamPolicy": + +-type NetworkAttachmentsTestIamPermissionsCall struct { ++type NetworkAttachmentsSetIamPolicyCall struct { + s *Service + project string + region string + resource string +- testpermissionsrequest *TestPermissionsRequest ++ regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. + // + // - project: Project ID for this request. + // - region: The name of the region for this request. + // - resource: Name or id of the resource for this request. +-func (r *NetworkAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkAttachmentsTestIamPermissionsCall { +- c := &NetworkAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *NetworkAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NetworkAttachmentsSetIamPolicyCall { ++ c := &NetworkAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest ++ c.regionsetpolicyrequest = regionsetpolicyrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkAttachmentsTestIamPermissionsCall { ++func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -139332,21 +141090,21 @@ func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *NetworkAttachmentsTestIamPermissionsCall { ++func (c *NetworkAttachmentsSetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsSetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Header() http.Header { ++func (c *NetworkAttachmentsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -139354,14 +141112,14 @@ func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http. + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -139376,14 +141134,14 @@ func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http. + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++// Do executes the "compute.networkAttachments.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -139402,7 +141160,7 @@ func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOpti + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &TestPermissionsResponse{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -139414,10 +141172,178 @@ func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOpti + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + // "httpMethod": "POST", +- // "id": "compute.networkAttachments.testIamPermissions", ++ // "id": "compute.networkAttachments.setIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", ++ // "request": { ++ // "$ref": "RegionSetPolicyRequest" ++ // }, ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkAttachments.testIamPermissions": ++ ++type NetworkAttachmentsTestIamPermissionsCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *NetworkAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkAttachmentsTestIamPermissionsCall { ++ c := &NetworkAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkAttachmentsTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *NetworkAttachmentsTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkAttachmentsTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkAttachments.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.networkAttachments.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", +@@ -167075,83 +169001,86 @@ func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func( + } + } + +-// method id "compute.regionCommitments.get": ++// method id "compute.regionCommitments.calculateCancellationFee": + +-type RegionCommitmentsGetCall struct { +- s *Service +- project string +- region string +- commitment string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type RegionCommitmentsCalculateCancellationFeeCall struct { ++ s *Service ++ project string ++ region string ++ commitment string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified commitment resource. ++// CalculateCancellationFee: Calculate cancellation fee for the ++// specified commitment. + // +-// - commitment: Name of the commitment to return. ++// - commitment: Name of the commitment to delete. + // - project: Project ID for this request. + // - region: Name of the region for this request. +-func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { +- c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionCommitmentsService) CalculateCancellationFee(project string, region string, commitment string) *RegionCommitmentsCalculateCancellationFeeCall { ++ c := &RegionCommitmentsCalculateCancellationFeeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.commitment = commitment + return c + } + ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionCommitmentsCalculateCancellationFeeCall) RequestId(requestId string) *RegionCommitmentsCalculateCancellationFeeCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { ++func (c *RegionCommitmentsCalculateCancellationFeeCall) Fields(s ...googleapi.Field) *RegionCommitmentsCalculateCancellationFeeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { ++func (c *RegionCommitmentsCalculateCancellationFeeCall) Context(ctx context.Context) *RegionCommitmentsCalculateCancellationFeeCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionCommitmentsGetCall) Header() http.Header { ++func (c *RegionCommitmentsCalculateCancellationFeeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionCommitmentsCalculateCancellationFeeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}/calculateCancellationFee") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -167164,14 +169093,14 @@ func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionCommitments.get" call. +-// Exactly one of *Commitment or error will be non-nil. Any non-2xx ++// Do executes the "compute.regionCommitments.calculateCancellationFee" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either +-// *Commitment.ServerResponse.Header or (if a response was returned at ++// *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { ++func (c *RegionCommitmentsCalculateCancellationFeeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -167190,7 +169119,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Commitment{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -167202,10 +169131,10 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment + } + return ret, nil + // { +- // "description": "Returns the specified commitment resource.", +- // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", +- // "httpMethod": "GET", +- // "id": "compute.regionCommitments.get", ++ // "description": "Calculate cancellation fee for the specified commitment.", ++ // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}/calculateCancellationFee", ++ // "httpMethod": "POST", ++ // "id": "compute.regionCommitments.calculateCancellationFee", + // "parameterOrder": [ + // "project", + // "region", +@@ -167213,7 +169142,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment + // ], + // "parameters": { + // "commitment": { +- // "description": "Name of the commitment to return.", ++ // "description": "Name of the commitment to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -167232,40 +169161,44 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/commitments/{commitment}", ++ // "path": "projects/{project}/regions/{region}/commitments/{commitment}/calculateCancellationFee", + // "response": { +- // "$ref": "Commitment" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.regionCommitments.insert": ++// method id "compute.regionCommitments.cancel": + +-type RegionCommitmentsInsertCall struct { ++type RegionCommitmentsCancelCall struct { + s *Service + project string + region string +- commitment *Commitment ++ commitment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// Insert: Creates a commitment in the specified project using the data +-// included in the request. ++// Cancel: Cancel the specified commitment. + // ++// - commitment: Name of the commitment to delete. + // - project: Project ID for this request. + // - region: Name of the region for this request. +-func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { +- c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionCommitmentsService) Cancel(project string, region string, commitment string) *RegionCommitmentsCancelCall { ++ c := &RegionCommitmentsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.commitment = commitment +@@ -167283,7 +169216,7 @@ func (r *RegionCommitmentsService) Insert(project string, region string, commitm + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { ++func (c *RegionCommitmentsCancelCall) RequestId(requestId string) *RegionCommitmentsCancelCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -167291,7 +169224,7 @@ func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitm + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { ++func (c *RegionCommitmentsCancelCall) Fields(s ...googleapi.Field) *RegionCommitmentsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -167299,21 +169232,21 @@ func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommit + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { ++func (c *RegionCommitmentsCancelCall) Context(ctx context.Context) *RegionCommitmentsCancelCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionCommitmentsInsertCall) Header() http.Header { ++func (c *RegionCommitmentsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionCommitmentsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -167321,14 +169254,9 @@ func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, err + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}/cancel") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -167336,20 +169264,375 @@ func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, ++ "project": c.project, ++ "region": c.region, ++ "commitment": c.commitment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionCommitments.insert" call. ++// Do executes the "compute.regionCommitments.cancel" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionCommitmentsCancelCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Cancel the specified commitment.", ++ // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}/cancel", ++ // "httpMethod": "POST", ++ // "id": "compute.regionCommitments.cancel", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "commitment" ++ // ], ++ // "parameters": { ++ // "commitment": { ++ // "description": "Name of the commitment to delete.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/commitments/{commitment}/cancel", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionCommitments.get": ++ ++type RegionCommitmentsGetCall struct { ++ s *Service ++ project string ++ region string ++ commitment string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Get: Returns the specified commitment resource. ++// ++// - commitment: Name of the commitment to return. ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { ++ c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.commitment = commitment ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionCommitmentsGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments/{commitment}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "commitment": c.commitment, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionCommitments.get" call. ++// Exactly one of *Commitment or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Commitment.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Commitment{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns the specified commitment resource.", ++ // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", ++ // "httpMethod": "GET", ++ // "id": "compute.regionCommitments.get", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "commitment" ++ // ], ++ // "parameters": { ++ // "commitment": { ++ // "description": "Name of the commitment to return.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/commitments/{commitment}", ++ // "response": { ++ // "$ref": "Commitment" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionCommitments.insert": ++ ++type RegionCommitmentsInsertCall struct { ++ s *Service ++ project string ++ region string ++ commitment *Commitment ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates a commitment in the specified project using the data ++// included in the request. ++// ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { ++ c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.commitment = commitment ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionCommitmentsInsertCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/commitments") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionCommitments.insert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -178821,6 +181104,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl + } + return ret, nil + // { ++ // "deprecated": true, + // "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use regionInstanceGroupManagers.patch instead.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "httpMethod": "POST", +@@ -188099,6 +190383,195 @@ func (c *RegionNetworkFirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption + + } + ++// method id "compute.regionNetworkFirewallPolicies.patchAssociation": ++ ++type RegionNetworkFirewallPoliciesPatchAssociationCall struct { ++ s *Service ++ project string ++ region string ++ firewallPolicy string ++ firewallpolicyassociation *FirewallPolicyAssociation ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// PatchAssociation: Updates an association for the specified network ++// firewall policy. ++// ++// - firewallPolicy: Name of the firewall policy to update. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionNetworkFirewallPoliciesService) PatchAssociation(project string, region string, firewallPolicy string, firewallpolicyassociation *FirewallPolicyAssociation) *RegionNetworkFirewallPoliciesPatchAssociationCall { ++ c := &RegionNetworkFirewallPoliciesPatchAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.firewallPolicy = firewallPolicy ++ c.firewallpolicyassociation = firewallpolicyassociation ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) RequestId(requestId string) *RegionNetworkFirewallPoliciesPatchAssociationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) Fields(s ...googleapi.Field) *RegionNetworkFirewallPoliciesPatchAssociationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) Context(ctx context.Context) *RegionNetworkFirewallPoliciesPatchAssociationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyassociation) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchAssociation") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "firewallPolicy": c.firewallPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionNetworkFirewallPolicies.patchAssociation" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionNetworkFirewallPoliciesPatchAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Updates an association for the specified network firewall policy.", ++ // "flatPath": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchAssociation", ++ // "httpMethod": "POST", ++ // "id": "compute.regionNetworkFirewallPolicies.patchAssociation", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "firewallPolicy" ++ // ], ++ // "parameters": { ++ // "firewallPolicy": { ++ // "description": "Name of the firewall policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/firewallPolicies/{firewallPolicy}/patchAssociation", ++ // "request": { ++ // "$ref": "FirewallPolicyAssociation" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionNetworkFirewallPolicies.patchRule": + + type RegionNetworkFirewallPoliciesPatchRuleCall struct { +@@ -205701,6 +208174,191 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { + + } + ++// method id "compute.routers.getNatIpInfo": ++ ++type RoutersGetNatIpInfoCall struct { ++ s *Service ++ project string ++ region string ++ router string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// GetNatIpInfo: Retrieves runtime NAT IP information. ++// ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++// - router: Name of the Router resource to query for Nat IP ++// information. The name should conform to RFC1035. ++func (r *RoutersService) GetNatIpInfo(project string, region string, router string) *RoutersGetNatIpInfoCall { ++ c := &RoutersGetNatIpInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.router = router ++ return c ++} ++ ++// NatName sets the optional parameter "natName": Name of the nat ++// service to filter the NAT IP information. If it is omitted, all nats ++// for this router will be returned. Name should conform to RFC1035. ++func (c *RoutersGetNatIpInfoCall) NatName(natName string) *RoutersGetNatIpInfoCall { ++ c.urlParams_.Set("natName", natName) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RoutersGetNatIpInfoCall) Fields(s ...googleapi.Field) *RoutersGetNatIpInfoCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RoutersGetNatIpInfoCall) IfNoneMatch(entityTag string) *RoutersGetNatIpInfoCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RoutersGetNatIpInfoCall) Context(ctx context.Context) *RoutersGetNatIpInfoCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RoutersGetNatIpInfoCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RoutersGetNatIpInfoCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "router": c.router, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.routers.getNatIpInfo" call. ++// Exactly one of *NatIpInfoResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *NatIpInfoResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RoutersGetNatIpInfoCall) Do(opts ...googleapi.CallOption) (*NatIpInfoResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &NatIpInfoResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves runtime NAT IP information.", ++ // "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", ++ // "httpMethod": "GET", ++ // "id": "compute.routers.getNatIpInfo", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "router" ++ // ], ++ // "parameters": { ++ // "natName": { ++ // "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "router": { ++ // "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", ++ // "response": { ++ // "$ref": "NatIpInfoResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ + // method id "compute.routers.getNatMappingInfo": + + type RoutersGetNatMappingInfoCall struct { +@@ -212201,24 +214859,6 @@ func (r *ServiceAttachmentsService) Patch(project string, region string, service + return c + } + +-// ReconcileConnections sets the optional parameter +-// "reconcileConnections": This flag determines how to change the status +-// of consumer connections, when the connection policy for the +-// corresponding project or network is modified. If the flag is false, +-// the default case, then existing ACCEPTED and REJECTED consumer +-// connections stay in that state. For example, even if the project is +-// removed from the accept list, existing ACCEPTED connections will stay +-// the same. If the flag is true, then the connection can change from +-// ACCEPTED or REJECTED to pending when the connection policy is +-// modified. For example, if a project is removed from the reject list, +-// its existing REJECTED connections will move to the PENDING state. If +-// the project is also added to the accept list, then those connections +-// will move to the ACCEPTED state. +-func (c *ServiceAttachmentsPatchCall) ReconcileConnections(reconcileConnections bool) *ServiceAttachmentsPatchCall { +- c.urlParams_.Set("reconcileConnections", fmt.Sprint(reconcileConnections)) +- return c +-} +- + // RequestId sets the optional parameter "requestId": An optional + // request ID to identify requests. Specify a unique request ID so that + // if you must retry your request, the server will know to ignore the +@@ -212345,11 +214985,6 @@ func (c *ServiceAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operati + // "required": true, + // "type": "string" + // }, +- // "reconcileConnections": { +- // "description": "This flag determines how to change the status of consumer connections, when the connection policy for the corresponding project or network is modified. If the flag is false, the default case, then existing ACCEPTED and REJECTED consumer connections stay in that state. For example, even if the project is removed from the accept list, existing ACCEPTED connections will stay the same. If the flag is true, then the connection can change from ACCEPTED or REJECTED to pending when the connection policy is modified. For example, if a project is removed from the reject list, its existing REJECTED connections will move to the PENDING state. If the project is also added to the accept list, then those connections will move to the ACCEPTED state.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "region": { + // "description": "The region scoping this request and should conform to RFC1035.", + // "location": "path", +@@ -217034,6 +219669,2041 @@ func (c *SslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T + + } + ++// method id "compute.storagePools.aggregatedList": ++ ++type StoragePoolsAggregatedListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// AggregatedList: Retrieves an aggregated list of storage pools. ++// ++// - project: Project ID for this request. ++func (r *StoragePoolsService) AggregatedList(project string) *StoragePoolsAggregatedListCall { ++ c := &StoragePoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *StoragePoolsAggregatedListCall) Filter(filter string) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// IncludeAllScopes sets the optional parameter "includeAllScopes": ++// Indicates whether every visible scope for each scope type (zone, ++// region, global) should be included in the response. For new resource ++// types added after this field, the flag has no effect as new resource ++// types will always include every visible scope for each scope type in ++// response. For resource types which predate this field, if this flag ++// is omitted or false, only scopes of the scope types where the ++// resource type is expected to be found will be included. ++func (c *StoragePoolsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *StoragePoolsAggregatedListCall) MaxResults(maxResults int64) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *StoragePoolsAggregatedListCall) OrderBy(orderBy string) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *StoragePoolsAggregatedListCall) PageToken(pageToken string) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *StoragePoolsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsAggregatedListCall) Fields(s ...googleapi.Field) *StoragePoolsAggregatedListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *StoragePoolsAggregatedListCall) IfNoneMatch(entityTag string) *StoragePoolsAggregatedListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsAggregatedListCall) Context(ctx context.Context) *StoragePoolsAggregatedListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsAggregatedListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/storagePools") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.aggregatedList" call. ++// Exactly one of *StoragePoolAggregatedList or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *StoragePoolAggregatedList.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *StoragePoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*StoragePoolAggregatedList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &StoragePoolAggregatedList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves an aggregated list of storage pools.", ++ // "flatPath": "projects/{project}/aggregated/storagePools", ++ // "httpMethod": "GET", ++ // "id": "compute.storagePools.aggregatedList", ++ // "parameterOrder": [ ++ // "project" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "includeAllScopes": { ++ // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/aggregated/storagePools", ++ // "response": { ++ // "$ref": "StoragePoolAggregatedList" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *StoragePoolsAggregatedListCall) Pages(ctx context.Context, f func(*StoragePoolAggregatedList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.storagePools.delete": ++ ++type StoragePoolsDeleteCall struct { ++ s *Service ++ project string ++ zone string ++ storagePool string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Delete: Deletes the specified storage pool. Deleting a storagePool ++// removes its data permanently and is irreversible. However, deleting a ++// storagePool does not delete any snapshots previously made from the ++// storagePool. You must separately delete snapshots. ++// ++// - project: Project ID for this request. ++// - storagePool: Name of the storage pool to delete. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) Delete(project string, zone string, storagePool string) *StoragePoolsDeleteCall { ++ c := &StoragePoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.storagePool = storagePool ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *StoragePoolsDeleteCall) RequestId(requestId string) *StoragePoolsDeleteCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsDeleteCall) Fields(s ...googleapi.Field) *StoragePoolsDeleteCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsDeleteCall) Context(ctx context.Context) *StoragePoolsDeleteCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsDeleteCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsDeleteCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{storagePool}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("DELETE", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "storagePool": c.storagePool, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.delete" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *StoragePoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Deletes the specified storage pool. Deleting a storagePool removes its data permanently and is irreversible. However, deleting a storagePool does not delete any snapshots previously made from the storagePool. You must separately delete snapshots.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.storagePools.delete", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "storagePool" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "storagePool": { ++ // "description": "Name of the storage pool to delete.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.get": ++ ++type StoragePoolsGetCall struct { ++ s *Service ++ project string ++ zone string ++ storagePool string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Get: Returns a specified storage pool. Gets a list of available ++// storage pools by making a list() request. ++// ++// - project: Project ID for this request. ++// - storagePool: Name of the storage pool to return. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) Get(project string, zone string, storagePool string) *StoragePoolsGetCall { ++ c := &StoragePoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.storagePool = storagePool ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsGetCall) Fields(s ...googleapi.Field) *StoragePoolsGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *StoragePoolsGetCall) IfNoneMatch(entityTag string) *StoragePoolsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsGetCall) Context(ctx context.Context) *StoragePoolsGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{storagePool}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "storagePool": c.storagePool, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.get" call. ++// Exactly one of *StoragePool or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *StoragePool.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *StoragePoolsGetCall) Do(opts ...googleapi.CallOption) (*StoragePool, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &StoragePool{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns a specified storage pool. Gets a list of available storage pools by making a list() request.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "httpMethod": "GET", ++ // "id": "compute.storagePools.get", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "storagePool" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "storagePool": { ++ // "description": "Name of the storage pool to return.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "response": { ++ // "$ref": "StoragePool" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.getIamPolicy": ++ ++type StoragePoolsGetIamPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// GetIamPolicy: Gets the access control policy for a resource. May be ++// empty if no such policy or resource exists. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) GetIamPolicy(project string, zone string, resource string) *StoragePoolsGetIamPolicyCall { ++ c := &StoragePoolsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ return c ++} ++ ++// OptionsRequestedPolicyVersion sets the optional parameter ++// "optionsRequestedPolicyVersion": Requested IAM Policy version. ++func (c *StoragePoolsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *StoragePoolsGetIamPolicyCall { ++ c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsGetIamPolicyCall) Fields(s ...googleapi.Field) *StoragePoolsGetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *StoragePoolsGetIamPolicyCall) IfNoneMatch(entityTag string) *StoragePoolsGetIamPolicyCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsGetIamPolicyCall) Context(ctx context.Context) *StoragePoolsGetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsGetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{resource}/getIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.getIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *StoragePoolsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/getIamPolicy", ++ // "httpMethod": "GET", ++ // "id": "compute.storagePools.getIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "optionsRequestedPolicyVersion": { ++ // "description": "Requested IAM Policy version.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{resource}/getIamPolicy", ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.insert": ++ ++type StoragePoolsInsertCall struct { ++ s *Service ++ project string ++ zone string ++ storagepool *StoragePool ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates a storage pool in the specified project using the ++// data in the request. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) Insert(project string, zone string, storagepool *StoragePool) *StoragePoolsInsertCall { ++ c := &StoragePoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.storagepool = storagepool ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *StoragePoolsInsertCall) RequestId(requestId string) *StoragePoolsInsertCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsInsertCall) Fields(s ...googleapi.Field) *StoragePoolsInsertCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsInsertCall) Context(ctx context.Context) *StoragePoolsInsertCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsInsertCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsInsertCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.storagepool) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.insert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *StoragePoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Creates a storage pool in the specified project using the data in the request.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools", ++ // "httpMethod": "POST", ++ // "id": "compute.storagePools.insert", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools", ++ // "request": { ++ // "$ref": "StoragePool" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.list": ++ ++type StoragePoolsListCall struct { ++ s *Service ++ project string ++ zone string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// List: Retrieves a list of storage pools contained within the ++// specified zone. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) List(project string, zone string) *StoragePoolsListCall { ++ c := &StoragePoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *StoragePoolsListCall) Filter(filter string) *StoragePoolsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *StoragePoolsListCall) MaxResults(maxResults int64) *StoragePoolsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *StoragePoolsListCall) OrderBy(orderBy string) *StoragePoolsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *StoragePoolsListCall) PageToken(pageToken string) *StoragePoolsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *StoragePoolsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *StoragePoolsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsListCall) Fields(s ...googleapi.Field) *StoragePoolsListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *StoragePoolsListCall) IfNoneMatch(entityTag string) *StoragePoolsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsListCall) Context(ctx context.Context) *StoragePoolsListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.list" call. ++// Exactly one of *StoragePoolList or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *StoragePoolList.ServerResponse.Header or (if a response was returned ++// at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *StoragePoolsListCall) Do(opts ...googleapi.CallOption) (*StoragePoolList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &StoragePoolList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves a list of storage pools contained within the specified zone.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools", ++ // "httpMethod": "GET", ++ // "id": "compute.storagePools.list", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools", ++ // "response": { ++ // "$ref": "StoragePoolList" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *StoragePoolsListCall) Pages(ctx context.Context, f func(*StoragePoolList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.storagePools.setIamPolicy": ++ ++type StoragePoolsSetIamPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ zonesetpolicyrequest *ZoneSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *StoragePoolsSetIamPolicyCall { ++ c := &StoragePoolsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ c.zonesetpolicyrequest = zonesetpolicyrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsSetIamPolicyCall) Fields(s ...googleapi.Field) *StoragePoolsSetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsSetIamPolicyCall) Context(ctx context.Context) *StoragePoolsSetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsSetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{resource}/setIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *StoragePoolsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/setIamPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.storagePools.setIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{resource}/setIamPolicy", ++ // "request": { ++ // "$ref": "ZoneSetPolicyRequest" ++ // }, ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.setLabels": ++ ++type StoragePoolsSetLabelsCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ zonesetlabelsrequest *ZoneSetLabelsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetLabels: Sets the labels on a storage pools. To learn more about ++// labels, read the Labeling Resources documentation. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *StoragePoolsSetLabelsCall { ++ c := &StoragePoolsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ c.zonesetlabelsrequest = zonesetlabelsrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *StoragePoolsSetLabelsCall) RequestId(requestId string) *StoragePoolsSetLabelsCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsSetLabelsCall) Fields(s ...googleapi.Field) *StoragePoolsSetLabelsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsSetLabelsCall) Context(ctx context.Context) *StoragePoolsSetLabelsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsSetLabelsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsSetLabelsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{resource}/setLabels") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.setLabels" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *StoragePoolsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the labels on a storage pools. To learn more about labels, read the Labeling Resources documentation.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/setLabels", ++ // "httpMethod": "POST", ++ // "id": "compute.storagePools.setLabels", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{resource}/setLabels", ++ // "request": { ++ // "$ref": "ZoneSetLabelsRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.testIamPermissions": ++ ++type StoragePoolsTestIamPermissionsCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *StoragePoolsTestIamPermissionsCall { ++ c := &StoragePoolsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsTestIamPermissionsCall) Fields(s ...googleapi.Field) *StoragePoolsTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsTestIamPermissionsCall) Context(ctx context.Context) *StoragePoolsTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *StoragePoolsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.storagePools.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.storagePools.update": ++ ++type StoragePoolsUpdateCall struct { ++ s *Service ++ project string ++ zone string ++ storagePool string ++ storagepool *StoragePool ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Update: Updates the specified storagePool with the data included in ++// the request. The update is performed only on selected fields included ++// as part of update-mask. Only the following fields can be modified: ++// size_tb and provisioned_iops. ++// ++// - project: Project ID for this request. ++// - storagePool: The storagePool name for this request. ++// - zone: The name of the zone for this request. ++func (r *StoragePoolsService) Update(project string, zone string, storagePool string, storagepool *StoragePool) *StoragePoolsUpdateCall { ++ c := &StoragePoolsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.storagePool = storagePool ++ c.storagepool = storagepool ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *StoragePoolsUpdateCall) RequestId(requestId string) *StoragePoolsUpdateCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// UpdateMask sets the optional parameter "updateMask": update_mask ++// indicates fields to be updated as part of this request. ++func (c *StoragePoolsUpdateCall) UpdateMask(updateMask string) *StoragePoolsUpdateCall { ++ c.urlParams_.Set("updateMask", updateMask) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *StoragePoolsUpdateCall) Fields(s ...googleapi.Field) *StoragePoolsUpdateCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *StoragePoolsUpdateCall) Context(ctx context.Context) *StoragePoolsUpdateCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *StoragePoolsUpdateCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *StoragePoolsUpdateCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.storagepool) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/storagePools/{storagePool}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("PATCH", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "storagePool": c.storagePool, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.storagePools.update" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *StoragePoolsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Updates the specified storagePool with the data included in the request. The update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: size_tb and provisioned_iops.", ++ // "flatPath": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "httpMethod": "PATCH", ++ // "id": "compute.storagePools.update", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "storagePool" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "storagePool": { ++ // "description": "The storagePool name for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "updateMask": { ++ // "description": "update_mask indicates fields to be updated as part of this request.", ++ // "format": "google-fieldmask", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/storagePools/{storagePool}", ++ // "request": { ++ // "$ref": "StoragePool" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.subnetworks.aggregatedList": + + type SubnetworksAggregatedListCall struct { +@@ -225652,6 +230322,196 @@ func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInsta + } + } + ++// method id "compute.targetInstances.setSecurityPolicy": ++ ++type TargetInstancesSetSecurityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ targetInstance string ++ securitypolicyreference *SecurityPolicyReference ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified target instance. For more information, see Google Cloud ++// Armor Overview ++// ++// - project: Project ID for this request. ++// - targetInstance: Name of the TargetInstance resource to which the ++// security policy should be set. The name should conform to RFC1035. ++// - zone: Name of the zone scoping this request. ++func (r *TargetInstancesService) SetSecurityPolicy(project string, zone string, targetInstance string, securitypolicyreference *SecurityPolicyReference) *TargetInstancesSetSecurityPolicyCall { ++ c := &TargetInstancesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.targetInstance = targetInstance ++ c.securitypolicyreference = securitypolicyreference ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *TargetInstancesSetSecurityPolicyCall) RequestId(requestId string) *TargetInstancesSetSecurityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *TargetInstancesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *TargetInstancesSetSecurityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *TargetInstancesSetSecurityPolicyCall) Context(ctx context.Context) *TargetInstancesSetSecurityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *TargetInstancesSetSecurityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *TargetInstancesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "targetInstance": c.targetInstance, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.targetInstances.setSecurityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *TargetInstancesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Google Cloud Armor security policy for the specified target instance. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.targetInstances.setSecurityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "targetInstance" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "targetInstance": { ++ // "description": "Name of the TargetInstance resource to which the security policy should be set. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "Name of the zone scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ // "request": { ++ // "$ref": "SecurityPolicyReference" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.targetInstances.testIamPermissions": + + type TargetInstancesTestIamPermissionsCall struct { +@@ -228062,6 +232922,196 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + ++// method id "compute.targetPools.setSecurityPolicy": ++ ++type TargetPoolsSetSecurityPolicyCall struct { ++ s *Service ++ project string ++ region string ++ targetPool string ++ securitypolicyreference *SecurityPolicyReference ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified target pool. For more information, see Google Cloud ++// Armor Overview ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - targetPool: Name of the TargetPool resource to which the security ++// policy should be set. The name should conform to RFC1035. ++func (r *TargetPoolsService) SetSecurityPolicy(project string, region string, targetPool string, securitypolicyreference *SecurityPolicyReference) *TargetPoolsSetSecurityPolicyCall { ++ c := &TargetPoolsSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.targetPool = targetPool ++ c.securitypolicyreference = securitypolicyreference ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *TargetPoolsSetSecurityPolicyCall) RequestId(requestId string) *TargetPoolsSetSecurityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *TargetPoolsSetSecurityPolicyCall) Fields(s ...googleapi.Field) *TargetPoolsSetSecurityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *TargetPoolsSetSecurityPolicyCall) Context(ctx context.Context) *TargetPoolsSetSecurityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *TargetPoolsSetSecurityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *TargetPoolsSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "targetPool": c.targetPool, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.targetPools.setSecurityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *TargetPoolsSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Google Cloud Armor security policy for the specified target pool. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.targetPools.setSecurityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "targetPool" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "targetPool": { ++ // "description": "Name of the TargetPool resource to which the security policy should be set. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ // "request": { ++ // "$ref": "SecurityPolicyReference" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.targetPools.testIamPermissions": + + type TargetPoolsTestIamPermissionsCall struct { +diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +index 63a7da87c21..9ed51146330 100644 +--- a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json ++++ b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +@@ -550,6 +550,56 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "move": { ++ "description": "Moves the specified address resource.", ++ "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", ++ "httpMethod": "POST", ++ "id": "compute.addresses.move", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "address" ++ ], ++ "parameters": { ++ "address": { ++ "description": "Name of the address resource to move.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Source project ID which the Address is moved from.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/addresses/{address}/move", ++ "request": { ++ "$ref": "RegionAddressesMoveRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setLabels": { + "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", +@@ -2457,6 +2507,48 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "bulkInsert": { ++ "description": "Bulk create a set of disks.", ++ "flatPath": "projects/{project}/zones/{zone}/disks/bulkInsert", ++ "httpMethod": "POST", ++ "id": "compute.disks.bulkInsert", ++ "parameterOrder": [ ++ "project", ++ "zone" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/disks/bulkInsert", ++ "request": { ++ "$ref": "BulkInsertDiskResource" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "createSnapshot": { + "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", + "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/createSnapshot", +@@ -2955,6 +3047,145 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "startAsyncReplication": { ++ "description": "Starts asynchronous replication. Must be invoked on the primary disk.", ++ "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", ++ "httpMethod": "POST", ++ "id": "compute.disks.startAsyncReplication", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "disk" ++ ], ++ "parameters": { ++ "disk": { ++ "description": "The name of the persistent disk.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", ++ "request": { ++ "$ref": "DisksStartAsyncReplicationRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "stopAsyncReplication": { ++ "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", ++ "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", ++ "httpMethod": "POST", ++ "id": "compute.disks.stopAsyncReplication", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "disk" ++ ], ++ "parameters": { ++ "disk": { ++ "description": "The name of the persistent disk.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "stopGroupAsyncReplication": { ++ "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", ++ "flatPath": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", ++ "httpMethod": "POST", ++ "id": "compute.disks.stopGroupAsyncReplication", ++ "parameterOrder": [ ++ "project", ++ "zone" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request. This must be the zone of the primary or secondary disks in the consistency group.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", ++ "request": { ++ "$ref": "DisksStopGroupAsyncReplicationResource" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", +@@ -4827,6 +5058,48 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "move": { ++ "description": "Moves the specified address resource from one project to another project.", ++ "flatPath": "projects/{project}/global/addresses/{address}/move", ++ "httpMethod": "POST", ++ "id": "compute.globalAddresses.move", ++ "parameterOrder": [ ++ "project", ++ "address" ++ ], ++ "parameters": { ++ "address": { ++ "description": "Name of the address resource to move.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Source project ID which the Address is moved from.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/addresses/{address}/move", ++ "request": { ++ "$ref": "GlobalAddressesMoveRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setLabels": { + "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", +@@ -8437,6 +8710,7 @@ + ] + }, + "setAutoHealingPolicies": { ++ "deprecated": true, + "description": "Motifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use instanceGroupManagers.patch instead.", + "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + "httpMethod": "POST", +@@ -11274,11 +11548,11 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "setServiceAccount": { +- "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", +- "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified instance. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + "httpMethod": "POST", +- "id": "compute.instances.setServiceAccount", ++ "id": "compute.instances.setSecurityPolicy", + "parameterOrder": [ + "project", + "zone", +@@ -11286,9 +11560,8 @@ + ], + "parameters": { + "instance": { +- "description": "Name of the instance resource to start.", ++ "description": "Name of the Instance resource to which the security policy should be set. The name should conform to RFC1035.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +@@ -11305,16 +11578,16 @@ + "type": "string" + }, + "zone": { +- "description": "The name of the zone for this request.", ++ "description": "Name of the zone scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ "path": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + "request": { +- "$ref": "InstancesSetServiceAccountRequest" ++ "$ref": "InstancesSetSecurityPolicyRequest" + }, + "response": { + "$ref": "Operation" +@@ -11324,11 +11597,11 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "setShieldedInstanceIntegrityPolicy": { +- "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", +- "httpMethod": "PATCH", +- "id": "compute.instances.setShieldedInstanceIntegrityPolicy", ++ "setServiceAccount": { ++ "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", ++ "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ "httpMethod": "POST", ++ "id": "compute.instances.setServiceAccount", + "parameterOrder": [ + "project", + "zone", +@@ -11336,7 +11609,57 @@ + ], + "parameters": { + "instance": { +- "description": "Name or id of the instance scoping this request.", ++ "description": "Name of the instance resource to start.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ "request": { ++ "$ref": "InstancesSetServiceAccountRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "setShieldedInstanceIntegrityPolicy": { ++ "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", ++ "httpMethod": "PATCH", ++ "id": "compute.instances.setShieldedInstanceIntegrityPolicy", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "instance" ++ ], ++ "parameters": { ++ "instance": { ++ "description": "Name or id of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -11499,6 +11822,11 @@ + "required": true, + "type": "string" + }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", +@@ -12113,13 +12441,13 @@ + } + } + }, +- "interconnectAttachments": { ++ "instantSnapshots": { + "methods": { + "aggregatedList": { +- "description": "Retrieves an aggregated list of interconnect attachments.", +- "flatPath": "projects/{project}/aggregated/interconnectAttachments", ++ "description": "Retrieves an aggregated list of instantSnapshots.", ++ "flatPath": "projects/{project}/aggregated/instantSnapshots", + "httpMethod": "GET", +- "id": "compute.interconnectAttachments.aggregatedList", ++ "id": "compute.instantSnapshots.aggregatedList", + "parameterOrder": [ + "project" + ], +@@ -12165,9 +12493,9 @@ + "type": "boolean" + } + }, +- "path": "projects/{project}/aggregated/interconnectAttachments", ++ "path": "projects/{project}/aggregated/instantSnapshots", + "response": { +- "$ref": "InterconnectAttachmentAggregatedList" ++ "$ref": "InstantSnapshotAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12176,18 +12504,18 @@ + ] + }, + "delete": { +- "description": "Deletes the specified interconnect attachment.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ "description": "Deletes the specified InstantSnapshot resource. Keep in mind that deleting a single instantSnapshot might not necessarily delete all the data on that instantSnapshot. If any data on the instantSnapshot that is marked for deletion is needed for subsequent instantSnapshots, the data will be moved to the next corresponding instantSnapshot. For more information, see Deleting instantSnapshots.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + "httpMethod": "DELETE", +- "id": "compute.interconnectAttachments.delete", ++ "id": "compute.instantSnapshots.delete", + "parameterOrder": [ + "project", +- "region", +- "interconnectAttachment" ++ "zone", ++ "instantSnapshot" + ], + "parameters": { +- "interconnectAttachment": { +- "description": "Name of the interconnect attachment to delete.", ++ "instantSnapshot": { ++ "description": "Name of the InstantSnapshot resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -12200,20 +12528,70 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region for this request.", ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "export": { ++ "description": "Export the changed blocks between two instant snapshots to a customer's bucket in the user specified format.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}/export", ++ "httpMethod": "POST", ++ "id": "compute.instantSnapshots.export", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "instantSnapshot" ++ ], ++ "parameters": { ++ "instantSnapshot": { ++ "description": "Name of the instant snapshot to export.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}/export", ++ "request": { ++ "$ref": "InstantSnapshotsExportRequest" ++ }, + "response": { + "$ref": "Operation" + }, +@@ -12223,18 +12601,18 @@ + ] + }, + "get": { +- "description": "Returns the specified interconnect attachment.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ "description": "Returns the specified InstantSnapshot resource in the specified zone.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + "httpMethod": "GET", +- "id": "compute.interconnectAttachments.get", ++ "id": "compute.instantSnapshots.get", + "parameterOrder": [ + "project", +- "region", +- "interconnectAttachment" ++ "zone", ++ "instantSnapshot" + ], + "parameters": { +- "interconnectAttachment": { +- "description": "Name of the interconnect attachment to return.", ++ "instantSnapshot": { ++ "description": "Name of the InstantSnapshot resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -12247,17 +12625,17 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region for this request.", ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + "response": { +- "$ref": "InterconnectAttachment" ++ "$ref": "InstantSnapshot" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12265,16 +12643,23 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "insert": { +- "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", +- "httpMethod": "POST", +- "id": "compute.interconnectAttachments.insert", ++ "getIamPolicy": { ++ "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/getIamPolicy", ++ "httpMethod": "GET", ++ "id": "compute.instantSnapshots.getIamPolicy", + "parameterOrder": [ + "project", +- "region" ++ "zone", ++ "resource" + ], + "parameters": { ++ "optionsRequestedPolicyVersion": { ++ "description": "Requested IAM Policy version.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -12282,27 +12667,64 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region for this request.", ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/getIamPolicy", ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Creates an instant snapshot in the specified zone.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots", ++ "httpMethod": "POST", ++ "id": "compute.instantSnapshots.insert", ++ "parameterOrder": [ ++ "project", ++ "zone" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, +- "validateOnly": { +- "description": "If true, the request will not be committed.", +- "location": "query", +- "type": "boolean" ++ "zone": { ++ "description": "Name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots", + "request": { +- "$ref": "InterconnectAttachment" ++ "$ref": "InstantSnapshot" + }, + "response": { + "$ref": "Operation" +@@ -12313,13 +12735,13 @@ + ] + }, + "list": { +- "description": "Retrieves the list of interconnect attachments contained within the specified region.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", ++ "description": "Retrieves the list of InstantSnapshot resources contained within the specified zone.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots", + "httpMethod": "GET", +- "id": "compute.interconnectAttachments.list", ++ "id": "compute.instantSnapshots.list", + "parameterOrder": [ + "project", +- "region" ++ "zone" + ], + "parameters": { + "filter": { +@@ -12352,22 +12774,22 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region for this request.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- "required": true, +- "type": "string" +- }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots", + "response": { +- "$ref": "InterconnectAttachmentList" ++ "$ref": "InstantSnapshotList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12375,24 +12797,17 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "patch": { +- "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", +- "httpMethod": "PATCH", +- "id": "compute.interconnectAttachments.patch", ++ "setIamPolicy": { ++ "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setIamPolicy", ++ "httpMethod": "POST", ++ "id": "compute.instantSnapshots.setIamPolicy", + "parameterOrder": [ + "project", +- "region", +- "interconnectAttachment" ++ "zone", ++ "resource" + ], + "parameters": { +- "interconnectAttachment": { +- "description": "Name of the interconnect attachment to patch.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- "required": true, +- "type": "string" +- }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -12400,25 +12815,27 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", ++ "resource": { ++ "description": "Name or id of the resource for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "requestId": { +- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- "location": "query", ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setIamPolicy", + "request": { +- "$ref": "InterconnectAttachment" ++ "$ref": "ZoneSetPolicyRequest" + }, + "response": { +- "$ref": "Operation" ++ "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12426,13 +12843,13 @@ + ] + }, + "setLabels": { +- "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", ++ "description": "Sets the labels on a instantSnapshot in the given zone. To learn more about labels, read the Labeling Resources documentation.", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setLabels", + "httpMethod": "POST", +- "id": "compute.interconnectAttachments.setLabels", ++ "id": "compute.instantSnapshots.setLabels", + "parameterOrder": [ + "project", +- "region", ++ "zone", + "resource" + ], + "parameters": { +@@ -12443,13 +12860,6 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "The region for this request.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- "required": true, +- "type": "string" +- }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", +@@ -12461,11 +12871,18 @@ + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setLabels", + "request": { +- "$ref": "RegionSetLabelsRequest" ++ "$ref": "ZoneSetLabelsRequest" + }, + "response": { + "$ref": "Operation" +@@ -12477,12 +12894,12 @@ + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", +- "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", ++ "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/testIamPermissions", + "httpMethod": "POST", +- "id": "compute.interconnectAttachments.testIamPermissions", ++ "id": "compute.instantSnapshots.testIamPermissions", + "parameterOrder": [ + "project", +- "region", ++ "zone", + "resource" + ], + "parameters": { +@@ -12493,22 +12910,22 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "The name of the region for this request.", ++ "resource": { ++ "description": "Name or id of the resource for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "resource": { +- "description": "Name or id of the resource for this request.", ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", ++ "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, +@@ -12523,48 +12940,13 @@ + } + } + }, +- "interconnectLocations": { ++ "interconnectAttachments": { + "methods": { +- "get": { +- "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", +- "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", +- "httpMethod": "GET", +- "id": "compute.interconnectLocations.get", +- "parameterOrder": [ +- "project", +- "interconnectLocation" +- ], +- "parameters": { +- "interconnectLocation": { +- "description": "Name of the interconnect location to return.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- "required": true, +- "type": "string" +- }, +- "project": { +- "description": "Project ID for this request.", +- "location": "path", +- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- "required": true, +- "type": "string" +- } +- }, +- "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", +- "response": { +- "$ref": "InterconnectLocation" +- }, +- "scopes": [ +- "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" +- ] +- }, +- "list": { +- "description": "Retrieves the list of interconnect locations available to the specified project.", +- "flatPath": "projects/{project}/global/interconnectLocations", ++ "aggregatedList": { ++ "description": "Retrieves an aggregated list of interconnect attachments.", ++ "flatPath": "projects/{project}/aggregated/interconnectAttachments", + "httpMethod": "GET", +- "id": "compute.interconnectLocations.list", ++ "id": "compute.interconnectAttachments.aggregatedList", + "parameterOrder": [ + "project" + ], +@@ -12574,6 +12956,11 @@ + "location": "query", + "type": "string" + }, ++ "includeAllScopes": { ++ "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ "location": "query", ++ "type": "boolean" ++ }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -12605,32 +12992,29 @@ + "type": "boolean" + } + }, +- "path": "projects/{project}/global/interconnectLocations", ++ "path": "projects/{project}/aggregated/interconnectAttachments", + "response": { +- "$ref": "InterconnectLocationList" ++ "$ref": "InterconnectAttachmentAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] +- } +- } +- }, +- "interconnects": { +- "methods": { ++ }, + "delete": { +- "description": "Deletes the specified Interconnect.", +- "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ "description": "Deletes the specified interconnect attachment.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "DELETE", +- "id": "compute.interconnects.delete", ++ "id": "compute.interconnectAttachments.delete", + "parameterOrder": [ + "project", +- "interconnect" ++ "region", ++ "interconnectAttachment" + ], + "parameters": { +- "interconnect": { +- "description": "Name of the interconnect to delete.", ++ "interconnectAttachment": { ++ "description": "Name of the interconnect attachment to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -12643,13 +13027,20 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, +- "path": "projects/{project}/global/interconnects/{interconnect}", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "response": { + "$ref": "Operation" + }, +@@ -12659,17 +13050,18 @@ + ] + }, + "get": { +- "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", +- "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ "description": "Returns the specified interconnect attachment.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "GET", +- "id": "compute.interconnects.get", ++ "id": "compute.interconnectAttachments.get", + "parameterOrder": [ + "project", +- "interconnect" ++ "region", ++ "interconnectAttachment" + ], + "parameters": { +- "interconnect": { +- "description": "Name of the interconnect to return.", ++ "interconnectAttachment": { ++ "description": "Name of the interconnect attachment to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -12681,46 +13073,18 @@ + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" +- } +- }, +- "path": "projects/{project}/global/interconnects/{interconnect}", +- "response": { +- "$ref": "Interconnect" +- }, +- "scopes": [ +- "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" +- ] +- }, +- "getDiagnostics": { +- "description": "Returns the interconnectDiagnostics for the specified Interconnect.", +- "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", +- "httpMethod": "GET", +- "id": "compute.interconnects.getDiagnostics", +- "parameterOrder": [ +- "project", +- "interconnect" +- ], +- "parameters": { +- "interconnect": { +- "description": "Name of the interconnect resource to query.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- "required": true, +- "type": "string" + }, +- "project": { +- "description": "Project ID for this request.", ++ "region": { ++ "description": "Name of the region for this request.", + "location": "path", +- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "response": { +- "$ref": "InterconnectsGetDiagnosticsResponse" ++ "$ref": "InterconnectAttachment" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12729,12 +13093,13 @@ + ] + }, + "insert": { +- "description": "Creates an Interconnect in the specified project using the data included in the request.", +- "flatPath": "projects/{project}/global/interconnects", ++ "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", + "httpMethod": "POST", +- "id": "compute.interconnects.insert", ++ "id": "compute.interconnectAttachments.insert", + "parameterOrder": [ +- "project" ++ "project", ++ "region" + ], + "parameters": { + "project": { +@@ -12744,15 +13109,27 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" ++ }, ++ "validateOnly": { ++ "description": "If true, the request will not be committed.", ++ "location": "query", ++ "type": "boolean" + } + }, +- "path": "projects/{project}/global/interconnects", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments", + "request": { +- "$ref": "Interconnect" ++ "$ref": "InterconnectAttachment" + }, + "response": { + "$ref": "Operation" +@@ -12763,12 +13140,13 @@ + ] + }, + "list": { +- "description": "Retrieves the list of Interconnects available to the specified project.", +- "flatPath": "projects/{project}/global/interconnects", ++ "description": "Retrieves the list of interconnect attachments contained within the specified region.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", + "httpMethod": "GET", +- "id": "compute.interconnects.list", ++ "id": "compute.interconnectAttachments.list", + "parameterOrder": [ +- "project" ++ "project", ++ "region" + ], + "parameters": { + "filter": { +@@ -12801,15 +13179,22 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, +- "path": "projects/{project}/global/interconnects", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments", + "response": { +- "$ref": "InterconnectList" ++ "$ref": "InterconnectAttachmentList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12818,17 +13203,18 @@ + ] + }, + "patch": { +- "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "PATCH", +- "id": "compute.interconnects.patch", ++ "id": "compute.interconnectAttachments.patch", + "parameterOrder": [ + "project", +- "interconnect" ++ "region", ++ "interconnectAttachment" + ], + "parameters": { +- "interconnect": { +- "description": "Name of the interconnect to update.", ++ "interconnectAttachment": { ++ "description": "Name of the interconnect attachment to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -12841,15 +13227,22 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, +- "path": "projects/{project}/global/interconnects/{interconnect}", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "request": { +- "$ref": "Interconnect" ++ "$ref": "InterconnectAttachment" + }, + "response": { + "$ref": "Operation" +@@ -12860,12 +13253,13 @@ + ] + }, + "setLabels": { +- "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", +- "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", ++ "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + "httpMethod": "POST", +- "id": "compute.interconnects.setLabels", ++ "id": "compute.interconnectAttachments.setLabels", + "parameterOrder": [ + "project", ++ "region", + "resource" + ], + "parameters": { +@@ -12876,17 +13270,29 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "The region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/global/interconnects/{resource}/setLabels", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + "request": { +- "$ref": "GlobalSetLabelsRequest" ++ "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" +@@ -12898,11 +13304,12 @@ + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", +- "flatPath": "projects/{project}/global/interconnects/{resource}/testIamPermissions", ++ "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", + "httpMethod": "POST", +- "id": "compute.interconnects.testIamPermissions", ++ "id": "compute.interconnectAttachments.testIamPermissions", + "parameterOrder": [ + "project", ++ "region", + "resource" + ], + "parameters": { +@@ -12913,15 +13320,22 @@ + "required": true, + "type": "string" + }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/global/interconnects/{resource}/testIamPermissions", ++ "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, +@@ -12936,22 +13350,22 @@ + } + } + }, +- "licenseCodes": { ++ "interconnectLocations": { + "methods": { + "get": { +- "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenseCodes/{licenseCode}", ++ "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", ++ "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", + "httpMethod": "GET", +- "id": "compute.licenseCodes.get", ++ "id": "compute.interconnectLocations.get", + "parameterOrder": [ + "project", +- "licenseCode" ++ "interconnectLocation" + ], + "parameters": { +- "licenseCode": { +- "description": "Number corresponding to the License code resource to return.", ++ "interconnectLocation": { ++ "description": "Name of the interconnect location to return.", + "location": "path", +- "pattern": "[0-9]{0,61}?", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +@@ -12963,9 +13377,9 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenseCodes/{licenseCode}", ++ "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", + "response": { +- "$ref": "LicenseCode" ++ "$ref": "InterconnectLocation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -12973,16 +13387,38 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "testIamPermissions": { +- "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", +- "httpMethod": "POST", +- "id": "compute.licenseCodes.testIamPermissions", ++ "list": { ++ "description": "Retrieves the list of interconnect locations available to the specified project.", ++ "flatPath": "projects/{project}/global/interconnectLocations", ++ "httpMethod": "GET", ++ "id": "compute.interconnectLocations.list", + "parameterOrder": [ +- "project", +- "resource" ++ "project" + ], + "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -12990,20 +13426,109 @@ + "required": true, + "type": "string" + }, +- "resource": { +- "description": "Name or id of the resource for this request.", ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/global/interconnectLocations", ++ "response": { ++ "$ref": "InterconnectLocationList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ } ++ } ++ }, ++ "interconnectRemoteLocations": { ++ "methods": { ++ "get": { ++ "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", ++ "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ "httpMethod": "GET", ++ "id": "compute.interconnectRemoteLocations.get", ++ "parameterOrder": [ ++ "project", ++ "interconnectRemoteLocation" ++ ], ++ "parameters": { ++ "interconnectRemoteLocation": { ++ "description": "Name of the interconnect remote location to return.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", +- "request": { +- "$ref": "TestPermissionsRequest" ++ "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ "response": { ++ "$ref": "InterconnectRemoteLocation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "list": { ++ "description": "Retrieves the list of interconnect remote locations available to the specified project.", ++ "flatPath": "projects/{project}/global/interconnectRemoteLocations", ++ "httpMethod": "GET", ++ "id": "compute.interconnectRemoteLocations.list", ++ "parameterOrder": [ ++ "project" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } + }, ++ "path": "projects/{project}/global/interconnectRemoteLocations", + "response": { +- "$ref": "TestPermissionsResponse" ++ "$ref": "InterconnectRemoteLocationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -13013,20 +13538,20 @@ + } + } + }, +- "licenses": { ++ "interconnects": { + "methods": { + "delete": { +- "description": "Deletes the specified license. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses/{license}", ++ "description": "Deletes the specified Interconnect.", ++ "flatPath": "projects/{project}/global/interconnects/{interconnect}", + "httpMethod": "DELETE", +- "id": "compute.licenses.delete", ++ "id": "compute.interconnects.delete", + "parameterOrder": [ + "project", +- "license" ++ "interconnect" + ], + "parameters": { +- "license": { +- "description": "Name of the license resource to delete.", ++ "interconnect": { ++ "description": "Name of the interconnect to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -13045,7 +13570,7 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses/{license}", ++ "path": "projects/{project}/global/interconnects/{interconnect}", + "response": { + "$ref": "Operation" + }, +@@ -13055,17 +13580,17 @@ + ] + }, + "get": { +- "description": "Returns the specified License resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses/{license}", ++ "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", ++ "flatPath": "projects/{project}/global/interconnects/{interconnect}", + "httpMethod": "GET", +- "id": "compute.licenses.get", ++ "id": "compute.interconnects.get", + "parameterOrder": [ + "project", +- "license" ++ "interconnect" + ], + "parameters": { +- "license": { +- "description": "Name of the License resource to return.", ++ "interconnect": { ++ "description": "Name of the interconnect to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, +@@ -13079,9 +13604,9 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses/{license}", ++ "path": "projects/{project}/global/interconnects/{interconnect}", + "response": { +- "$ref": "License" ++ "$ref": "Interconnect" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -13089,40 +13614,34 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "getIamPolicy": { +- "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ "getDiagnostics": { ++ "description": "Returns the interconnectDiagnostics for the specified Interconnect.", ++ "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + "httpMethod": "GET", +- "id": "compute.licenses.getIamPolicy", ++ "id": "compute.interconnects.getDiagnostics", + "parameterOrder": [ + "project", +- "resource" ++ "interconnect" + ], + "parameters": { +- "optionsRequestedPolicyVersion": { +- "description": "Requested IAM Policy version.", +- "format": "int32", +- "location": "query", +- "type": "integer" +- }, +- "project": { +- "description": "Project ID for this request.", ++ "interconnect": { ++ "description": "Name of the interconnect resource to query.", + "location": "path", +- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "resource": { +- "description": "Name or id of the resource for this request.", ++ "project": { ++ "description": "Project ID for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + "response": { +- "$ref": "Policy" ++ "$ref": "InterconnectsGetDiagnosticsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -13131,10 +13650,10 @@ + ] + }, + "insert": { +- "description": "Create a License resource in the specified project. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses", ++ "description": "Creates an Interconnect in the specified project using the data included in the request.", ++ "flatPath": "projects/{project}/global/interconnects", + "httpMethod": "POST", +- "id": "compute.licenses.insert", ++ "id": "compute.interconnects.insert", + "parameterOrder": [ + "project" + ], +@@ -13152,26 +13671,23 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses", ++ "path": "projects/{project}/global/interconnects", + "request": { +- "$ref": "License" ++ "$ref": "Interconnect" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/devstorage.full_control", +- "https://www.googleapis.com/auth/devstorage.read_only", +- "https://www.googleapis.com/auth/devstorage.read_write" ++ "https://www.googleapis.com/auth/compute" + ] + }, + "list": { +- "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses", ++ "description": "Retrieves the list of Interconnects available to the specified project.", ++ "flatPath": "projects/{project}/global/interconnects", + "httpMethod": "GET", +- "id": "compute.licenses.list", ++ "id": "compute.interconnects.list", + "parameterOrder": [ + "project" + ], +@@ -13212,9 +13728,9 @@ + "type": "boolean" + } + }, +- "path": "projects/{project}/global/licenses", ++ "path": "projects/{project}/global/interconnects", + "response": { +- "$ref": "LicensesListResponse" ++ "$ref": "InterconnectList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -13222,11 +13738,53 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "setIamPolicy": { +- "description": "Sets the access control policy on the specified resource. Replaces any existing policy. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ "patch": { ++ "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ "httpMethod": "PATCH", ++ "id": "compute.interconnects.patch", ++ "parameterOrder": [ ++ "project", ++ "interconnect" ++ ], ++ "parameters": { ++ "interconnect": { ++ "description": "Name of the interconnect to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/interconnects/{interconnect}", ++ "request": { ++ "$ref": "Interconnect" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "setLabels": { ++ "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", ++ "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", + "httpMethod": "POST", +- "id": "compute.licenses.setIamPolicy", ++ "id": "compute.interconnects.setLabels", + "parameterOrder": [ + "project", + "resource" +@@ -13247,12 +13805,12 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ "path": "projects/{project}/global/interconnects/{resource}/setLabels", + "request": { +- "$ref": "GlobalSetPolicyRequest" ++ "$ref": "GlobalSetLabelsRequest" + }, + "response": { +- "$ref": "Policy" ++ "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -13260,10 +13818,10 @@ + ] + }, + "testIamPermissions": { +- "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- "flatPath": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ "description": "Returns permissions that a caller has on the specified resource.", ++ "flatPath": "projects/{project}/global/interconnects/{resource}/testIamPermissions", + "httpMethod": "POST", +- "id": "compute.licenses.testIamPermissions", ++ "id": "compute.interconnects.testIamPermissions", + "parameterOrder": [ + "project", + "resource" +@@ -13284,7 +13842,7 @@ + "type": "string" + } + }, +- "path": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ "path": "projects/{project}/global/interconnects/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, +@@ -13299,22 +13857,22 @@ + } + } + }, +- "machineImages": { ++ "licenseCodes": { + "methods": { +- "delete": { +- "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", +- "flatPath": "projects/{project}/global/machineImages/{machineImage}", +- "httpMethod": "DELETE", +- "id": "compute.machineImages.delete", ++ "get": { ++ "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenseCodes/{licenseCode}", ++ "httpMethod": "GET", ++ "id": "compute.licenseCodes.get", + "parameterOrder": [ + "project", +- "machineImage" ++ "licenseCode" + ], + "parameters": { +- "machineImage": { +- "description": "The name of the machine image to delete.", ++ "licenseCode": { ++ "description": "Number corresponding to the License code resource to return.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[0-9]{0,61}?", + "required": true, + "type": "string" + }, +@@ -13324,7 +13882,370 @@ + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" +- }, ++ } ++ }, ++ "path": "projects/{project}/global/licenseCodes/{licenseCode}", ++ "response": { ++ "$ref": "LicenseCode" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", ++ "httpMethod": "POST", ++ "id": "compute.licenseCodes.testIamPermissions", ++ "parameterOrder": [ ++ "project", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", ++ "request": { ++ "$ref": "TestPermissionsRequest" ++ }, ++ "response": { ++ "$ref": "TestPermissionsResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ } ++ } ++ }, ++ "licenses": { ++ "methods": { ++ "delete": { ++ "description": "Deletes the specified license. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses/{license}", ++ "httpMethod": "DELETE", ++ "id": "compute.licenses.delete", ++ "parameterOrder": [ ++ "project", ++ "license" ++ ], ++ "parameters": { ++ "license": { ++ "description": "Name of the license resource to delete.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses/{license}", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "get": { ++ "description": "Returns the specified License resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses/{license}", ++ "httpMethod": "GET", ++ "id": "compute.licenses.get", ++ "parameterOrder": [ ++ "project", ++ "license" ++ ], ++ "parameters": { ++ "license": { ++ "description": "Name of the License resource to return.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses/{license}", ++ "response": { ++ "$ref": "License" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "getIamPolicy": { ++ "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ "httpMethod": "GET", ++ "id": "compute.licenses.getIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "resource" ++ ], ++ "parameters": { ++ "optionsRequestedPolicyVersion": { ++ "description": "Requested IAM Policy version.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Create a License resource in the specified project. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses", ++ "httpMethod": "POST", ++ "id": "compute.licenses.insert", ++ "parameterOrder": [ ++ "project" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses", ++ "request": { ++ "$ref": "License" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/devstorage.full_control", ++ "https://www.googleapis.com/auth/devstorage.read_only", ++ "https://www.googleapis.com/auth/devstorage.read_write" ++ ] ++ }, ++ "list": { ++ "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses", ++ "httpMethod": "GET", ++ "id": "compute.licenses.list", ++ "parameterOrder": [ ++ "project" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/global/licenses", ++ "response": { ++ "$ref": "LicensesListResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "setIamPolicy": { ++ "description": "Sets the access control policy on the specified resource. Replaces any existing policy. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ "httpMethod": "POST", ++ "id": "compute.licenses.setIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ "request": { ++ "$ref": "GlobalSetPolicyRequest" ++ }, ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ "flatPath": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ "httpMethod": "POST", ++ "id": "compute.licenses.testIamPermissions", ++ "parameterOrder": [ ++ "project", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ "request": { ++ "$ref": "TestPermissionsRequest" ++ }, ++ "response": { ++ "$ref": "TestPermissionsResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ } ++ } ++ }, ++ "machineImages": { ++ "methods": { ++ "delete": { ++ "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", ++ "flatPath": "projects/{project}/global/machineImages/{machineImage}", ++ "httpMethod": "DELETE", ++ "id": "compute.machineImages.delete", ++ "parameterOrder": [ ++ "project", ++ "machineImage" ++ ], ++ "parameters": { ++ "machineImage": { ++ "description": "The name of the machine image to delete.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", +@@ -19994,6 +20915,55 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", ++ "httpMethod": "POST", ++ "id": "compute.regionBackendServices.setSecurityPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "backendService" ++ ], ++ "parameters": { ++ "backendService": { ++ "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", ++ "request": { ++ "$ref": "SecurityPolicyReference" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/testIamPermissions", +@@ -20623,6 +21593,48 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "bulkInsert": { ++ "description": "Bulk create a set of disks.", ++ "flatPath": "projects/{project}/regions/{region}/disks/bulkInsert", ++ "httpMethod": "POST", ++ "id": "compute.regionDisks.bulkInsert", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/disks/bulkInsert", ++ "request": { ++ "$ref": "BulkInsertDiskResource" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "createSnapshot": { + "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", +@@ -21116,17 +22128,24 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "testIamPermissions": { +- "description": "Returns permissions that a caller has on the specified resource.", +- "flatPath": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", ++ "startAsyncReplication": { ++ "description": "Starts asynchronous replication. Must be invoked on the primary disk.", ++ "flatPath": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", + "httpMethod": "POST", +- "id": "compute.regionDisks.testIamPermissions", ++ "id": "compute.regionDisks.startAsyncReplication", + "parameterOrder": [ + "project", + "region", +- "resource" ++ "disk" + ], + "parameters": { ++ "disk": { ++ "description": "The name of the persistent disk.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -21141,32 +22160,29 @@ + "required": true, + "type": "string" + }, +- "resource": { +- "description": "Name or id of the resource for this request.", +- "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- "required": true, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", ++ "path": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", + "request": { +- "$ref": "TestPermissionsRequest" ++ "$ref": "RegionDisksStartAsyncReplicationRequest" + }, + "response": { +- "$ref": "TestPermissionsResponse" ++ "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" ++ "https://www.googleapis.com/auth/compute" + ] + }, +- "update": { +- "description": "Update the specified disk with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", +- "flatPath": "projects/{project}/regions/{region}/disks/{disk}", +- "httpMethod": "PATCH", +- "id": "compute.regionDisks.update", ++ "stopAsyncReplication": { ++ "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", ++ "flatPath": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", ++ "httpMethod": "POST", ++ "id": "compute.regionDisks.stopAsyncReplication", + "parameterOrder": [ + "project", + "region", +@@ -21174,17 +22190,12 @@ + ], + "parameters": { + "disk": { +- "description": "The disk name for this request.", ++ "description": "The name of the persistent disk.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "paths": { +- "location": "query", +- "repeated": true, +- "type": "string" +- }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -21203,18 +22214,9 @@ + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" +- }, +- "updateMask": { +- "description": "update_mask indicates fields to be updated as part of this request.", +- "format": "google-fieldmask", +- "location": "query", +- "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/disks/{disk}", +- "request": { +- "$ref": "Disk" +- }, ++ "path": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", + "response": { + "$ref": "Operation" + }, +@@ -21222,28 +22224,17 @@ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] +- } +- } +- }, +- "regionHealthCheckServices": { +- "methods": { +- "delete": { +- "description": "Deletes the specified regional HealthCheckService.", +- "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", +- "httpMethod": "DELETE", +- "id": "compute.regionHealthCheckServices.delete", ++ }, ++ "stopGroupAsyncReplication": { ++ "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", ++ "flatPath": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", ++ "httpMethod": "POST", ++ "id": "compute.regionDisks.stopGroupAsyncReplication", + "parameterOrder": [ + "project", +- "region", +- "healthCheckService" ++ "region" + ], + "parameters": { +- "healthCheckService": { +- "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", +- "location": "path", +- "required": true, +- "type": "string" +- }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -21252,7 +22243,7 @@ + "type": "string" + }, + "region": { +- "description": "Name of the region scoping this request.", ++ "description": "The name of the region for this request. This must be the region of the primary or secondary disks in the consistency group.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, +@@ -21264,7 +22255,10 @@ + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", ++ "path": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", ++ "request": { ++ "$ref": "DisksStopGroupAsyncReplicationResource" ++ }, + "response": { + "$ref": "Operation" + }, +@@ -21273,23 +22267,17 @@ + "https://www.googleapis.com/auth/compute" + ] + }, +- "get": { +- "description": "Returns the specified regional HealthCheckService resource.", +- "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", +- "httpMethod": "GET", +- "id": "compute.regionHealthCheckServices.get", ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource.", ++ "flatPath": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", ++ "httpMethod": "POST", ++ "id": "compute.regionDisks.testIamPermissions", + "parameterOrder": [ + "project", + "region", +- "healthCheckService" ++ "resource" + ], + "parameters": { +- "healthCheckService": { +- "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", +- "location": "path", +- "required": true, +- "type": "string" +- }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -21298,16 +22286,26 @@ + "type": "string" + }, + "region": { +- "description": "Name of the region scoping this request.", ++ "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", ++ "path": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", ++ "request": { ++ "$ref": "TestPermissionsRequest" ++ }, + "response": { +- "$ref": "HealthCheckService" ++ "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -21315,16 +22313,29 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "insert": { +- "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", +- "flatPath": "projects/{project}/regions/{region}/healthCheckServices", +- "httpMethod": "POST", +- "id": "compute.regionHealthCheckServices.insert", ++ "update": { ++ "description": "Update the specified disk with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", ++ "flatPath": "projects/{project}/regions/{region}/disks/{disk}", ++ "httpMethod": "PATCH", ++ "id": "compute.regionDisks.update", + "parameterOrder": [ + "project", +- "region" ++ "region", ++ "disk" + ], + "parameters": { ++ "disk": { ++ "description": "The disk name for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "paths": { ++ "location": "query", ++ "repeated": true, ++ "type": "string" ++ }, + "project": { + "description": "Project ID for this request.", + "location": "path", +@@ -21333,7 +22344,7 @@ + "type": "string" + }, + "region": { +- "description": "Name of the region scoping this request.", ++ "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, +@@ -21343,11 +22354,17 @@ + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" ++ }, ++ "updateMask": { ++ "description": "update_mask indicates fields to be updated as part of this request.", ++ "format": "google-fieldmask", ++ "location": "query", ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/healthCheckServices", ++ "path": "projects/{project}/regions/{region}/disks/{disk}", + "request": { +- "$ref": "HealthCheckService" ++ "$ref": "Disk" + }, + "response": { + "$ref": "Operation" +@@ -21356,38 +22373,26 @@ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] +- }, +- "list": { +- "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", +- "flatPath": "projects/{project}/regions/{region}/healthCheckServices", +- "httpMethod": "GET", +- "id": "compute.regionHealthCheckServices.list", ++ } ++ } ++ }, ++ "regionHealthCheckServices": { ++ "methods": { ++ "delete": { ++ "description": "Deletes the specified regional HealthCheckService.", ++ "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", ++ "httpMethod": "DELETE", ++ "id": "compute.regionHealthCheckServices.delete", + "parameterOrder": [ + "project", +- "region" ++ "region", ++ "healthCheckService" + ], + "parameters": { +- "filter": { +- "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- "location": "query", +- "type": "string" +- }, +- "maxResults": { +- "default": "500", +- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- "format": "uint32", +- "location": "query", +- "minimum": "0", +- "type": "integer" +- }, +- "orderBy": { +- "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- "location": "query", +- "type": "string" +- }, +- "pageToken": { +- "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +- "location": "query", ++ "healthCheckService": { ++ "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", ++ "location": "path", ++ "required": true, + "type": "string" + }, + "project": { +@@ -21404,27 +22409,173 @@ + "required": true, + "type": "string" + }, +- "returnPartialSuccess": { +- "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", +- "type": "boolean" ++ "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/healthCheckServices", ++ "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "response": { +- "$ref": "HealthCheckServicesList" ++ "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" ++ "https://www.googleapis.com/auth/compute" + ] + }, +- "patch": { +- "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ "get": { ++ "description": "Returns the specified regional HealthCheckService resource.", + "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", +- "httpMethod": "PATCH", +- "id": "compute.regionHealthCheckServices.patch", ++ "httpMethod": "GET", ++ "id": "compute.regionHealthCheckServices.get", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "healthCheckService" ++ ], ++ "parameters": { ++ "healthCheckService": { ++ "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", ++ "response": { ++ "$ref": "HealthCheckService" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", ++ "flatPath": "projects/{project}/regions/{region}/healthCheckServices", ++ "httpMethod": "POST", ++ "id": "compute.regionHealthCheckServices.insert", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/healthCheckServices", ++ "request": { ++ "$ref": "HealthCheckService" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "list": { ++ "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", ++ "flatPath": "projects/{project}/regions/{region}/healthCheckServices", ++ "httpMethod": "GET", ++ "id": "compute.regionHealthCheckServices.list", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/healthCheckServices", ++ "response": { ++ "$ref": "HealthCheckServicesList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "patch": { ++ "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", ++ "httpMethod": "PATCH", ++ "id": "compute.regionHealthCheckServices.patch", + "parameterOrder": [ + "project", + "region", +@@ -22737,6 +23888,7 @@ + ] + }, + "setAutoHealingPolicies": { ++ "deprecated": true, + "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use regionInstanceGroupManagers.patch instead.", + "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + "httpMethod": "POST", +@@ -23542,22 +24694,23 @@ + } + } + }, +- "regionNetworkEndpointGroups": { ++ "regionInstantSnapshots": { + "methods": { + "delete": { +- "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", +- "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "description": "Deletes the specified InstantSnapshot resource. Keep in mind that deleting a single instantSnapshot might not necessarily delete all the data on that instantSnapshot. If any data on the instantSnapshot that is marked for deletion is needed for subsequent instantSnapshots, the data will be moved to the next corresponding instantSnapshot. For more information, see Deleting instantSnapshots.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + "httpMethod": "DELETE", +- "id": "compute.regionNetworkEndpointGroups.delete", ++ "id": "compute.regionInstantSnapshots.delete", + "parameterOrder": [ + "project", + "region", +- "networkEndpointGroup" ++ "instantSnapshot" + ], + "parameters": { +- "networkEndpointGroup": { +- "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", ++ "instantSnapshot": { ++ "description": "Name of the InstantSnapshot resource to delete.", + "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +@@ -23569,8 +24722,9 @@ + "type": "string" + }, + "region": { +- "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", ++ "description": "The name of the region for this request.", + "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, +@@ -23580,7 +24734,57 @@ + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "export": { ++ "description": "Export the changed blocks between two instant snapshots to a customer's bucket in the user specified format.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}/export", ++ "httpMethod": "POST", ++ "id": "compute.regionInstantSnapshots.export", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "instantSnapshot" ++ ], ++ "parameters": { ++ "instantSnapshot": { ++ "description": "Name of the instant snapshot to export.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}/export", ++ "request": { ++ "$ref": "RegionInstantSnapshotsExportRequest" ++ }, + "response": { + "$ref": "Operation" + }, +@@ -23590,19 +24794,20 @@ + ] + }, + "get": { +- "description": "Returns the specified network endpoint group.", +- "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "description": "Returns the specified InstantSnapshot resource in the specified region.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + "httpMethod": "GET", +- "id": "compute.regionNetworkEndpointGroups.get", ++ "id": "compute.regionInstantSnapshots.get", + "parameterOrder": [ + "project", + "region", +- "networkEndpointGroup" ++ "instantSnapshot" + ], + "parameters": { +- "networkEndpointGroup": { +- "description": "The name of the network endpoint group. It should comply with RFC1035.", ++ "instantSnapshot": { ++ "description": "Name of the InstantSnapshot resource to return.", + "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +@@ -23614,15 +24819,65 @@ + "type": "string" + }, + "region": { +- "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", ++ "description": "The name of the region for this request.", + "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + "response": { +- "$ref": "NetworkEndpointGroup" ++ "$ref": "InstantSnapshot" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "getIamPolicy": { ++ "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/getIamPolicy", ++ "httpMethod": "GET", ++ "id": "compute.regionInstantSnapshots.getIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "resource" ++ ], ++ "parameters": { ++ "optionsRequestedPolicyVersion": { ++ "description": "Requested IAM Policy version.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/getIamPolicy", ++ "response": { ++ "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +@@ -23631,10 +24886,10 @@ + ] + }, + "insert": { +- "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", +- "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "description": "Creates an instant snapshot in the specified region.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots", + "httpMethod": "POST", +- "id": "compute.regionNetworkEndpointGroups.insert", ++ "id": "compute.regionInstantSnapshots.insert", + "parameterOrder": [ + "project", + "region" +@@ -23648,8 +24903,9 @@ + "type": "string" + }, + "region": { +- "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", ++ "description": "Name of the region for this request.", + "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, +@@ -23659,9 +24915,9 @@ + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "path": "projects/{project}/regions/{region}/instantSnapshots", + "request": { +- "$ref": "NetworkEndpointGroup" ++ "$ref": "InstantSnapshot" + }, + "response": { + "$ref": "Operation" +@@ -23672,10 +24928,345 @@ + ] + }, + "list": { +- "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", +- "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "description": "Retrieves the list of InstantSnapshot resources contained within the specified region.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots", + "httpMethod": "GET", +- "id": "compute.regionNetworkEndpointGroups.list", ++ "id": "compute.regionInstantSnapshots.list", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots", ++ "response": { ++ "$ref": "InstantSnapshotList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "setIamPolicy": { ++ "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setIamPolicy", ++ "httpMethod": "POST", ++ "id": "compute.regionInstantSnapshots.setIamPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setIamPolicy", ++ "request": { ++ "$ref": "RegionSetPolicyRequest" ++ }, ++ "response": { ++ "$ref": "Policy" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "setLabels": { ++ "description": "Sets the labels on a instantSnapshot in the given region. To learn more about labels, read the Labeling Resources documentation.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setLabels", ++ "httpMethod": "POST", ++ "id": "compute.regionInstantSnapshots.setLabels", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setLabels", ++ "request": { ++ "$ref": "RegionSetLabelsRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource.", ++ "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/testIamPermissions", ++ "httpMethod": "POST", ++ "id": "compute.regionInstantSnapshots.testIamPermissions", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "resource" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "resource": { ++ "description": "Name or id of the resource for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/testIamPermissions", ++ "request": { ++ "$ref": "TestPermissionsRequest" ++ }, ++ "response": { ++ "$ref": "TestPermissionsResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ } ++ } ++ }, ++ "regionNetworkEndpointGroups": { ++ "methods": { ++ "delete": { ++ "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", ++ "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "httpMethod": "DELETE", ++ "id": "compute.regionNetworkEndpointGroups.delete", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "networkEndpointGroup" ++ ], ++ "parameters": { ++ "networkEndpointGroup": { ++ "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "get": { ++ "description": "Returns the specified network endpoint group.", ++ "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "httpMethod": "GET", ++ "id": "compute.regionNetworkEndpointGroups.get", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "networkEndpointGroup" ++ ], ++ "parameters": { ++ "networkEndpointGroup": { ++ "description": "The name of the network endpoint group. It should comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", ++ "response": { ++ "$ref": "NetworkEndpointGroup" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "insert": { ++ "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", ++ "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "httpMethod": "POST", ++ "id": "compute.regionNetworkEndpointGroups.insert", ++ "parameterOrder": [ ++ "project", ++ "region" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "request": { ++ "$ref": "NetworkEndpointGroup" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "list": { ++ "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", ++ "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", ++ "httpMethod": "GET", ++ "id": "compute.regionNetworkEndpointGroups.list", + "parameterOrder": [ + "project", + "region" +@@ -25032,6 +26623,56 @@ + }, + "regionSecurityPolicies": { + "methods": { ++ "addRule": { ++ "description": "Inserts a rule into a security policy.", ++ "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", ++ "httpMethod": "POST", ++ "id": "compute.regionSecurityPolicies.addRule", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "securityPolicy" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "securityPolicy": { ++ "description": "Name of the security policy to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "validateOnly": { ++ "description": "If true, the request will not be committed.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", ++ "request": { ++ "$ref": "SecurityPolicyRule" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "delete": { + "description": "Deletes the specified policy.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", +@@ -25122,6 +26763,55 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "getRule": { ++ "description": "Gets a rule at the specified priority.", ++ "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", ++ "httpMethod": "GET", ++ "id": "compute.regionSecurityPolicies.getRule", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "securityPolicy" ++ ], ++ "parameters": { ++ "priority": { ++ "description": "The priority of the rule to get from the security policy.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "securityPolicy": { ++ "description": "Name of the security policy to which the queried rule belongs.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", ++ "response": { ++ "$ref": "SecurityPolicyRule" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, + "insert": { + "description": "Creates a new policy in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies", +@@ -25281,6 +26971,110 @@ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] ++ }, ++ "patchRule": { ++ "description": "Patches a rule at the specified priority. To clear fields in the rule, leave the fields empty and specify them in the updateMask.", ++ "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", ++ "httpMethod": "POST", ++ "id": "compute.regionSecurityPolicies.patchRule", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "securityPolicy" ++ ], ++ "parameters": { ++ "priority": { ++ "description": "The priority of the rule to patch.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "securityPolicy": { ++ "description": "Name of the security policy to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "validateOnly": { ++ "description": "If true, the request will not be committed.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", ++ "request": { ++ "$ref": "SecurityPolicyRule" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "removeRule": { ++ "description": "Deletes a rule at the specified priority.", ++ "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", ++ "httpMethod": "POST", ++ "id": "compute.regionSecurityPolicies.removeRule", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "securityPolicy" ++ ], ++ "parameters": { ++ "priority": { ++ "description": "The priority of the rule to remove from the security policy.", ++ "format": "int32", ++ "location": "query", ++ "type": "integer" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "securityPolicy": { ++ "description": "Name of the security policy to update.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] + } + } + }, +@@ -33047,15 +34841,15 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, +- "testIamPermissions": { +- "description": "Returns permissions that a caller has on the specified resource.", +- "flatPath": "projects/{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified target instance. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", + "httpMethod": "POST", +- "id": "compute.targetInstances.testIamPermissions", ++ "id": "compute.targetInstances.setSecurityPolicy", + "parameterOrder": [ + "project", + "zone", +- "resource" ++ "targetInstance" + ], + "parameters": { + "project": { +@@ -33065,47 +34859,46 @@ + "required": true, + "type": "string" + }, +- "resource": { +- "description": "Name or id of the resource for this request.", ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "targetInstance": { ++ "description": "Name of the TargetInstance resource to which the security policy should be set. The name should conform to RFC1035.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { +- "description": "The name of the zone for this request.", ++ "description": "Name of the zone scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", ++ "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", + "request": { +- "$ref": "TestPermissionsRequest" ++ "$ref": "SecurityPolicyReference" + }, + "response": { +- "$ref": "TestPermissionsResponse" ++ "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute", +- "https://www.googleapis.com/auth/compute.readonly" ++ "https://www.googleapis.com/auth/compute" + ] +- } +- } +- }, +- "targetPools": { +- "methods": { +- "addHealthCheck": { +- "description": "Adds health check URLs to a target pool.", +- "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", ++ }, ++ "testIamPermissions": { ++ "description": "Returns permissions that a caller has on the specified resource.", ++ "flatPath": "projects/{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", + "httpMethod": "POST", +- "id": "compute.targetPools.addHealthCheck", ++ "id": "compute.targetInstances.testIamPermissions", + "parameterOrder": [ + "project", +- "region", +- "targetPool" ++ "zone", ++ "resource" + ], + "parameters": { + "project": { +@@ -33115,43 +34908,93 @@ + "required": true, + "type": "string" + }, +- "region": { +- "description": "Name of the region scoping this request.", ++ "resource": { ++ "description": "Name or id of the resource for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, +- "requestId": { +- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- "location": "query", +- "type": "string" +- }, +- "targetPool": { +- "description": "Name of the target pool to add a health check to.", ++ "zone": { ++ "description": "The name of the zone for this request.", + "location": "path", +- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, +- "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", ++ "path": "projects/{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", + "request": { +- "$ref": "TargetPoolsAddHealthCheckRequest" ++ "$ref": "TestPermissionsRequest" + }, + "response": { +- "$ref": "Operation" ++ "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", +- "https://www.googleapis.com/auth/compute" ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" + ] +- }, +- "addInstance": { +- "description": "Adds an instance to a target pool.", +- "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addInstance", ++ } ++ } ++ }, ++ "targetPools": { ++ "methods": { ++ "addHealthCheck": { ++ "description": "Adds health check URLs to a target pool.", ++ "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + "httpMethod": "POST", +- "id": "compute.targetPools.addInstance", ++ "id": "compute.targetPools.addHealthCheck", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "targetPool" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "targetPool": { ++ "description": "Name of the target pool to add a health check to.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", ++ "request": { ++ "$ref": "TargetPoolsAddHealthCheckRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, ++ "addInstance": { ++ "description": "Adds an instance to a target pool.", ++ "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/addInstance", ++ "httpMethod": "POST", ++ "id": "compute.targetPools.addInstance", + "parameterOrder": [ + "project", + "region", +@@ -33654,6 +35497,55 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "setSecurityPolicy": { ++ "description": "Sets the Google Cloud Armor security policy for the specified target pool. For more information, see Google Cloud Armor Overview", ++ "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ "httpMethod": "POST", ++ "id": "compute.targetPools.setSecurityPolicy", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "targetPool" ++ ], ++ "parameters": { ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region scoping this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "targetPool": { ++ "description": "Name of the TargetPool resource to which the security policy should be set. The name should conform to RFC1035.", ++ "location": "path", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ "request": { ++ "$ref": "SecurityPolicyReference" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/targetPools/{resource}/testIamPermissions", +@@ -36287,7 +38179,7 @@ + } + } + }, +- "revision": "20230307", ++ "revision": "20230516", + "rootUrl": "https://compute.googleapis.com/", + "schemas": { + "AWSV4Signature": { +@@ -36730,11 +38622,11 @@ + "id": "AccessConfig", + "properties": { + "externalIpv6": { +- "description": "The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", ++ "description": "Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", + "type": "string" + }, + "externalIpv6PrefixLength": { +- "description": "The prefix length of the external IPv6 range.", ++ "description": "Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range.", + "format": "int32", + "type": "integer" + }, +@@ -36744,11 +38636,11 @@ + "type": "string" + }, + "name": { +- "description": "The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.", ++ "description": "The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6.", + "type": "string" + }, + "natIP": { +- "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", ++ "description": "Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", + "type": "string" + }, + "networkTier": { +@@ -36771,13 +38663,16 @@ + "description": "The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range.", + "type": "string" + }, ++ "securityPolicy": { ++ "description": "[Output Only] The resource URL for the security policy associated with this access config.", ++ "type": "string" ++ }, + "setPublicPtr": { + "description": "Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated.", + "type": "boolean" + }, + "type": { +- "default": "ONE_TO_ONE_NAT", +- "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", ++ "description": "The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6.", + "enum": [ + "DIRECT_IPV6", + "ONE_TO_ONE_NAT" +@@ -37575,6 +39470,18 @@ + ], + "type": "string" + }, ++ "savedState": { ++ "description": "For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api.", ++ "enum": [ ++ "DISK_SAVED_STATE_UNSPECIFIED", ++ "PRESERVED" ++ ], ++ "enumDescriptions": [ ++ "*[Default]* Disk state has not been preserved.", ++ "Disk state has been preserved." ++ ], ++ "type": "string" ++ }, + "shieldedInstanceInitialState": { + "$ref": "InitialStateConfig", + "description": "[Output Only] shielded vm initial state stored on disk" +@@ -37689,6 +39596,13 @@ + "format": "int64", + "type": "string" + }, ++ "replicaZones": { ++ "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. You can't use this option with boot disks.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "resourceManagerTags": { + "additionalProperties": { + "type": "string" +@@ -38308,7 +40222,7 @@ + "id": "AutoscalingPolicy", + "properties": { + "coolDownPeriodSec": { +- "description": "The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", ++ "description": "The number of seconds that your application takes to initialize on a VM instance. This is referred to as the [initialization period](/compute/docs/autoscaler#cool_down_period). Specifying an accurate initialization period improves autoscaler decisions. For example, when scaling out, the autoscaler ignores data from VMs that are still initializing because those VMs might not yet represent normal usage of your application. The default initialization period is 60 seconds. Initialization periods might vary because of numerous factors. We recommend that you test how long your application takes to initialize. To do this, create a VM and time your application's startup process.", + "format": "int32", + "type": "integer" + }, +@@ -38338,7 +40252,7 @@ + "type": "integer" + }, + "mode": { +- "description": "Defines operating mode for this policy.", ++ "description": "Defines the operating mode for this policy. The following modes are available: - OFF: Disables the autoscaler but maintains its configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM instances only. - ON: Enables all autoscaler activities according to its policy. For more information, see \"Turning off or restricting an autoscaler\"", + "enum": [ + "OFF", + "ON", +@@ -39068,6 +40982,13 @@ + "$ref": "Duration", + "description": "Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED." + }, ++ "metadatas": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Deployment metadata associated with the resource to be set by a GKE hub controller and read by the backend RCTH", ++ "type": "object" ++ }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +@@ -40117,6 +42038,17 @@ + }, + "type": "object" + }, ++ "BulkInsertDiskResource": { ++ "description": "A transient resource used in compute.disks.bulkInsert and compute.regionDisks.bulkInsert. It is only used to process requests and is not persisted.", ++ "id": "BulkInsertDiskResource", ++ "properties": { ++ "sourceConsistencyGroupPolicy": { ++ "description": "The URL of the DiskConsistencyGroupPolicy for the group of disks to clone. This may be a full or partial URL, such as: - https://www.googleapis.com/compute/v1/projects/project/regions/region /resourcePolicies/resourcePolicy - projects/project/regions/region/resourcePolicies/resourcePolicy - regions/region/resourcePolicies/resourcePolicy ", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "BulkInsertInstanceResource": { + "description": "A transient resource used in compute.instances.bulkInsert and compute.regionInstances.bulkInsert . This resource is not persisted anywhere, it is used only for processing the requests.", + "id": "BulkInsertInstanceResource", +@@ -40161,6 +42093,10 @@ + "description": "Per-instance properties to be set on individual instances. To be extended in the future.", + "id": "BulkInsertInstanceResourcePerInstanceProperties", + "properties": { ++ "hostname": { ++ "description": "Specifies the hostname of the instance. More details in: https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention", ++ "type": "string" ++ }, + "name": { + "description": "This field is only temporary. It will be removed. Do not use it.", + "type": "string" +@@ -40376,7 +42312,7 @@ + "type": "string" + }, + "splitSourceCommitment": { +- "description": "Source commitment to be splitted into a new commitment.", ++ "description": "Source commitment to be split into a new commitment.", + "type": "string" + }, + "startTimestamp": { +@@ -40417,6 +42353,7 @@ + "GENERAL_PURPOSE_N2", + "GENERAL_PURPOSE_N2D", + "GENERAL_PURPOSE_T2D", ++ "GRAPHICS_OPTIMIZED", + "MEMORY_OPTIMIZED", + "MEMORY_OPTIMIZED_M3", + "TYPE_UNSPECIFIED" +@@ -40433,6 +42370,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -41128,6 +43066,17 @@ + ], + "type": "string" + }, ++ "asyncPrimaryDisk": { ++ "$ref": "DiskAsyncReplication", ++ "description": "Disk asynchronously replicated into this disk." ++ }, ++ "asyncSecondaryDisks": { ++ "additionalProperties": { ++ "$ref": "DiskAsyncReplicationList" ++ }, ++ "description": "[Output Only] A list of disks this disk is asynchronously replicated to.", ++ "type": "object" ++ }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" +@@ -41273,6 +43222,10 @@ + }, + "type": "array" + }, ++ "resourceStatus": { ++ "$ref": "DiskResourceStatus", ++ "description": "[Output Only] Status information for the disk resource." ++ }, + "satisfiesPzs": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" +@@ -41286,6 +43239,14 @@ + "format": "int64", + "type": "string" + }, ++ "sourceConsistencyGroupPolicy": { ++ "description": "[Output Only] URL of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", ++ "type": "string" ++ }, ++ "sourceConsistencyGroupPolicyId": { ++ "description": "[Output Only] ID of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", ++ "type": "string" ++ }, + "sourceDisk": { + "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", + "type": "string" +@@ -41306,6 +43267,14 @@ + "description": "[Output Only] The ID value of the image used to create this disk. This value identifies the exact image that was used to create this persistent disk. For example, if you created the persistent disk from an image that was later deleted and recreated under the same name, the source image ID would identify the exact version of the image that was used.", + "type": "string" + }, ++ "sourceInstantSnapshot": { ++ "description": "The source instant snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instantSnapshots/instantSnapshot - projects/project/zones/zone/instantSnapshots/instantSnapshot - zones/zone/instantSnapshots/instantSnapshot ", ++ "type": "string" ++ }, ++ "sourceInstantSnapshotId": { ++ "description": "[Output Only] The unique ID of the instant snapshot used to create this disk. This value identifies the exact instant snapshot that was used to create this persistent disk. For example, if you created the persistent disk from an instant snapshot that was later deleted and recreated under the same name, the source instant snapshot ID would identify the exact version of the instant snapshot that was used.", ++ "type": "string" ++ }, + "sourceSnapshot": { + "description": "The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project /global/snapshots/snapshot - projects/project/global/snapshots/snapshot - global/snapshots/snapshot ", + "type": "string" +@@ -41504,6 +43473,37 @@ + }, + "type": "object" + }, ++ "DiskAsyncReplication": { ++ "id": "DiskAsyncReplication", ++ "properties": { ++ "consistencyGroupPolicy": { ++ "description": "[Output Only] URL of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, ++ "consistencyGroupPolicyId": { ++ "description": "[Output Only] ID of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, ++ "disk": { ++ "description": "The other disk asynchronously replicated to or from the current disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk ", ++ "type": "string" ++ }, ++ "diskId": { ++ "description": "[Output Only] The unique ID of the other disk asynchronously replicated to or from the current disk. This value identifies the exact disk that was used to create this replication. For example, if you started replicating the persistent disk from a disk that was later deleted and recreated under the same name, the disk ID would identify the exact version of the disk that was used.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "DiskAsyncReplicationList": { ++ "id": "DiskAsyncReplicationList", ++ "properties": { ++ "asyncReplicationDisk": { ++ "$ref": "DiskAsyncReplication" ++ } ++ }, ++ "type": "object" ++ }, + "DiskInstantiationConfig": { + "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", + "id": "DiskInstantiationConfig", +@@ -41693,6 +43693,47 @@ + }, + "type": "object" + }, ++ "DiskResourceStatus": { ++ "id": "DiskResourceStatus", ++ "properties": { ++ "asyncPrimaryDisk": { ++ "$ref": "DiskResourceStatusAsyncReplicationStatus" ++ }, ++ "asyncSecondaryDisks": { ++ "additionalProperties": { ++ "$ref": "DiskResourceStatusAsyncReplicationStatus" ++ }, ++ "description": "Key: disk, value: AsyncReplicationStatus message", ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "DiskResourceStatusAsyncReplicationStatus": { ++ "id": "DiskResourceStatusAsyncReplicationStatus", ++ "properties": { ++ "state": { ++ "enum": [ ++ "ACTIVE", ++ "CREATED", ++ "STARTING", ++ "STATE_UNSPECIFIED", ++ "STOPPED", ++ "STOPPING" ++ ], ++ "enumDescriptions": [ ++ "Replication is active.", ++ "Secondary disk is created and is waiting for replication to start.", ++ "Replication is starting.", ++ "", ++ "Replication is stopped.", ++ "Replication is stopping." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "DiskType": { + "description": "Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/beta/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/beta/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks.", + "id": "DiskType", +@@ -42236,6 +44277,27 @@ + }, + "type": "object" + }, ++ "DisksStartAsyncReplicationRequest": { ++ "id": "DisksStartAsyncReplicationRequest", ++ "properties": { ++ "asyncSecondaryDisk": { ++ "description": "The secondary disk to start asynchronous replication to. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "DisksStopGroupAsyncReplicationResource": { ++ "description": "A transient resource used in compute.disks.stopGroupAsyncReplication and compute.regionDisks.stopGroupAsyncReplication. It is only used to process requests and is not persisted.", ++ "id": "DisksStopGroupAsyncReplicationResource", ++ "properties": { ++ "resourcePolicy": { ++ "description": "The URL of the DiskConsistencyGroupPolicy for the group of disks to stop. This may be a full or partial URL, such as: - https://www.googleapis.com/compute/v1/projects/project/regions/region /resourcePolicies/resourcePolicy - projects/project/regions/region/resourcePolicies/resourcePolicy - regions/region/resourcePolicies/resourcePolicy ", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "DisplayDevice": { + "description": "A set of Display Device options", + "id": "DisplayDevice", +@@ -42594,6 +44656,10 @@ + "ipAddress": { + "description": "IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine.", + "type": "string" ++ }, ++ "ipv6Address": { ++ "description": "IPv6 address of the interface in the external VPN gateway. This IPv6 address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. Must specify an IPv6 address (not IPV4-mapped) using any format described in RFC 4291 (e.g. 2001:db8:0:0:2d9:51:0:0). The output format is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0).", ++ "type": "string" + } + }, + "type": "object" +@@ -43337,6 +45403,10 @@ + "format": "int32", + "type": "integer" + }, ++ "securityProfileGroup": { ++ "description": "A fully-qualified URL of a SecurityProfile resource instance. Example: https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group Must be specified if action = 'apply_security_profile_group' and cannot be specified for other actions.", ++ "type": "string" ++ }, + "targetResources": { + "description": "A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule.", + "items": { +@@ -43357,6 +45427,10 @@ + "type": "string" + }, + "type": "array" ++ }, ++ "tlsInspect": { ++ "description": "Boolean flag indicating if the traffic should be TLS decrypted. Can be set only if action = 'apply_security_profile_group' and cannot be set for other actions.", ++ "type": "boolean" + } + }, + "type": "object" +@@ -43650,7 +45724,7 @@ + "type": "string" + }, + "network": { +- "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", ++ "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "type": "string" + }, + "networkTier": { +@@ -44160,6 +46234,20 @@ + }, + "type": "object" + }, ++ "GlobalAddressesMoveRequest": { ++ "id": "GlobalAddressesMoveRequest", ++ "properties": { ++ "description": { ++ "description": "An optional destination address description if intended to be different from the source.", ++ "type": "string" ++ }, ++ "destinationAddress": { ++ "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project /global/addresses/address - projects/project/global/addresses/address Note that destination project must be different from the source project. So /global/addresses/address is not valid partial url.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { + "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", + "properties": { +@@ -44317,13 +46405,14 @@ + "id": "GuestOsFeature", + "properties": { + "type": { +- "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling guest operating system features.", ++ "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.", + "enum": [ + "FEATURE_TYPE_UNSPECIFIED", + "GVNIC", + "MULTI_IP_SUBNET", + "SECURE_BOOT", + "SEV_CAPABLE", ++ "SEV_LIVE_MIGRATABLE", + "SEV_SNP_CAPABLE", + "UEFI_COMPATIBLE", + "VIRTIO_SCSI_MULTIQUEUE", +@@ -44338,6 +46427,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -44505,7 +46595,7 @@ + "type": "object" + }, + "HealthCheck": { +- "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/beta/healthChecks) * [Regional](/compute/docs/reference/rest/beta/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.HealthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", ++ "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/beta/healthChecks) * [Regional](/compute/docs/reference/rest/beta/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.healthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.healthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", + "id": "HealthCheck", + "properties": { + "checkIntervalSec": { +@@ -45212,7 +47302,7 @@ + "type": "string" + }, + "healthState": { +- "description": "Health state of the instance.", ++ "description": "Health state of the IPv4 address of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" +@@ -45285,10 +47375,26 @@ + "UNKNOWN" + ], + "enumDescriptions": [ +- "", +- "", +- "", +- "" ++ "Endpoint is being drained.", ++ "Endpoint is healthy.", ++ "Endpoint is unhealthy.", ++ "Health status of the endpoint is unknown." ++ ], ++ "type": "string" ++ }, ++ "ipv6HealthState": { ++ "description": "Health state of the ipv6 network endpoint determined based on the health checks configured.", ++ "enum": [ ++ "DRAINING", ++ "HEALTHY", ++ "UNHEALTHY", ++ "UNKNOWN" ++ ], ++ "enumDescriptions": [ ++ "Endpoint is being drained.", ++ "Endpoint is healthy.", ++ "Endpoint is unhealthy.", ++ "Health status of the endpoint is unknown." + ], + "type": "string" + } +@@ -46759,7 +48865,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -47655,7 +49761,7 @@ + "type": "string" + }, + "initialDelaySec": { +- "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", ++ "description": "The initial delay is the number of seconds that a new VM takes to initialize and run its startup script. During a VM's initial delay period, the MIG ignores unsuccessful health checks because the VM might be in the startup process. This prevents the MIG from prematurely recreating a VM. If the health check receives a healthy response during the initial delay, it indicates that the startup process is complete and the VM is ready. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.", + "format": "int32", + "type": "integer" + } +@@ -48064,7 +50170,7 @@ + "id": "InstanceGroupManagersDeleteInstancesRequest", + "properties": { + "instances": { +- "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", ++ "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have URL and can be deleted only by name. One cannot specify both URLs and names in a single request.", + "items": { + "type": "string" + }, +@@ -49647,193 +51753,819 @@ + }, + "type": "object" + }, +- "InstanceWithNamedPorts": { +- "id": "InstanceWithNamedPorts", +- "properties": { +- "instance": { +- "description": "[Output Only] The URL of the instance.", +- "type": "string" +- }, +- "namedPorts": { +- "description": "[Output Only] The named ports that belong to this instance group.", +- "items": { +- "$ref": "NamedPort" +- }, +- "type": "array" +- }, +- "status": { +- "description": "[Output Only] The status of the instance.", +- "enum": [ +- "DEPROVISIONING", +- "PROVISIONING", +- "REPAIRING", +- "RUNNING", +- "STAGING", +- "STOPPED", +- "STOPPING", +- "SUSPENDED", +- "SUSPENDING", +- "TERMINATED" +- ], +- "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", +- "Resources are being allocated for the instance.", +- "The instance is in repair.", +- "The instance is running.", +- "All required resources have been allocated and the instance is being started.", +- "The instance has stopped successfully.", +- "The instance is currently stopping (either being deleted or killed).", +- "The instance has suspended.", +- "The instance is suspending.", +- "The instance has stopped (either by explicit action or underlying failure)." +- ], +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InstancesAddResourcePoliciesRequest": { +- "id": "InstancesAddResourcePoliciesRequest", +- "properties": { +- "resourcePolicies": { +- "description": "Resource policies to be added to this instance.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "InstancesGetEffectiveFirewallsResponse": { +- "id": "InstancesGetEffectiveFirewallsResponse", +- "properties": { +- "firewallPolicys": { +- "description": "Effective firewalls from firewall policies.", +- "items": { +- "$ref": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy" +- }, +- "type": "array" +- }, +- "firewalls": { +- "description": "Effective firewalls on the instance.", +- "items": { +- "$ref": "Firewall" +- }, +- "type": "array" +- }, +- "organizationFirewalls": { +- "description": "Effective firewalls from organization policies.", +- "items": { +- "$ref": "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy": { +- "id": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy", +- "properties": { +- "displayName": { +- "description": "[Output Only] Deprecated, please use short name instead. The display name of the firewall policy.", +- "type": "string" +- }, +- "name": { +- "description": "[Output Only] The name of the firewall policy.", +- "type": "string" +- }, +- "rules": { +- "description": "The rules that apply to the network.", +- "items": { +- "$ref": "FirewallPolicyRule" +- }, +- "type": "array" +- }, +- "shortName": { +- "description": "[Output Only] The short name of the firewall policy.", +- "type": "string" +- }, +- "type": { +- "description": "[Output Only] The type of the firewall policy. Can be one of HIERARCHY, NETWORK, NETWORK_REGIONAL.", +- "enum": [ +- "HIERARCHY", +- "NETWORK", +- "NETWORK_REGIONAL", +- "UNSPECIFIED" +- ], +- "enumDescriptions": [ +- "", +- "", +- "", +- "" +- ], +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy": { +- "description": "A pruned SecurityPolicy containing ID and any applicable firewall rules.", +- "id": "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy", ++ "InstanceWithNamedPorts": { ++ "id": "InstanceWithNamedPorts", ++ "properties": { ++ "instance": { ++ "description": "[Output Only] The URL of the instance.", ++ "type": "string" ++ }, ++ "namedPorts": { ++ "description": "[Output Only] The named ports that belong to this instance group.", ++ "items": { ++ "$ref": "NamedPort" ++ }, ++ "type": "array" ++ }, ++ "status": { ++ "description": "[Output Only] The status of the instance.", ++ "enum": [ ++ "DEPROVISIONING", ++ "PROVISIONING", ++ "REPAIRING", ++ "RUNNING", ++ "STAGING", ++ "STOPPED", ++ "STOPPING", ++ "SUSPENDED", ++ "SUSPENDING", ++ "TERMINATED" ++ ], ++ "enumDescriptions": [ ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "Resources are being allocated for the instance.", ++ "The instance is in repair.", ++ "The instance is running.", ++ "All required resources have been allocated and the instance is being started.", ++ "The instance has stopped successfully.", ++ "The instance is currently stopping (either being deleted or killed).", ++ "The instance has suspended.", ++ "The instance is suspending.", ++ "The instance has stopped (either by explicit action or underlying failure)." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesAddResourcePoliciesRequest": { ++ "id": "InstancesAddResourcePoliciesRequest", ++ "properties": { ++ "resourcePolicies": { ++ "description": "Resource policies to be added to this instance.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesGetEffectiveFirewallsResponse": { ++ "id": "InstancesGetEffectiveFirewallsResponse", ++ "properties": { ++ "firewallPolicys": { ++ "description": "Effective firewalls from firewall policies.", ++ "items": { ++ "$ref": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy" ++ }, ++ "type": "array" ++ }, ++ "firewalls": { ++ "description": "Effective firewalls on the instance.", ++ "items": { ++ "$ref": "Firewall" ++ }, ++ "type": "array" ++ }, ++ "organizationFirewalls": { ++ "description": "Effective firewalls from organization policies.", ++ "items": { ++ "$ref": "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy": { ++ "id": "InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy", ++ "properties": { ++ "displayName": { ++ "description": "[Output Only] Deprecated, please use short name instead. The display name of the firewall policy.", ++ "type": "string" ++ }, ++ "name": { ++ "description": "[Output Only] The name of the firewall policy.", ++ "type": "string" ++ }, ++ "rules": { ++ "description": "The rules that apply to the network.", ++ "items": { ++ "$ref": "FirewallPolicyRule" ++ }, ++ "type": "array" ++ }, ++ "shortName": { ++ "description": "[Output Only] The short name of the firewall policy.", ++ "type": "string" ++ }, ++ "type": { ++ "description": "[Output Only] The type of the firewall policy. Can be one of HIERARCHY, NETWORK, NETWORK_REGIONAL.", ++ "enum": [ ++ "HIERARCHY", ++ "NETWORK", ++ "NETWORK_REGIONAL", ++ "UNSPECIFIED" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy": { ++ "description": "A pruned SecurityPolicy containing ID and any applicable firewall rules.", ++ "id": "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy", ++ "properties": { ++ "id": { ++ "description": "The unique identifier for the security policy. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "rules": { ++ "description": "The rules that apply to the network.", ++ "items": { ++ "$ref": "SecurityPolicyRule" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesRemoveResourcePoliciesRequest": { ++ "id": "InstancesRemoveResourcePoliciesRequest", ++ "properties": { ++ "resourcePolicies": { ++ "description": "Resource policies to be removed from this instance.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesResumeRequest": { ++ "id": "InstancesResumeRequest", ++ "properties": { ++ "disks": { ++ "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key. In order to resume the instance, the disk url and its corresponding key must be provided. If the disk is not protected with a customer-supplied encryption key it should not be specified.", ++ "items": { ++ "$ref": "CustomerEncryptionKeyProtectedDisk" ++ }, ++ "type": "array" ++ }, ++ "instanceEncryptionKey": { ++ "$ref": "CustomerEncryptionKey", ++ "description": "Decrypts data associated with an instance that is protected with a customer-supplied encryption key. If the instance you are starting is protected with a customer-supplied encryption key, the correct key must be provided otherwise the instance resume will not succeed." ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesScopedList": { ++ "id": "InstancesScopedList", ++ "properties": { ++ "instances": { ++ "description": "[Output Only] A list of instances contained in this scope.", ++ "items": { ++ "$ref": "Instance" ++ }, ++ "type": "array" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning which replaces the list of instances when the list is empty.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetLabelsRequest": { ++ "id": "InstancesSetLabelsRequest", ++ "properties": { ++ "labelFingerprint": { ++ "description": "Fingerprint of the previous set of labels for this resource, used to prevent conflicts. Provide the latest fingerprint value when making a request to add or change labels.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetMachineResourcesRequest": { ++ "id": "InstancesSetMachineResourcesRequest", ++ "properties": { ++ "guestAccelerators": { ++ "description": "A list of the type and count of accelerator cards attached to the instance.", ++ "items": { ++ "$ref": "AcceleratorConfig" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetMachineTypeRequest": { ++ "id": "InstancesSetMachineTypeRequest", ++ "properties": { ++ "machineType": { ++ "description": "Full or partial URL of the machine type resource. See Machine Types for a full list of machine types. For example: zones/us-central1-f/machineTypes/n1-standard-1", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetMinCpuPlatformRequest": { ++ "id": "InstancesSetMinCpuPlatformRequest", ++ "properties": { ++ "minCpuPlatform": { ++ "description": "Minimum cpu/platform this instance should be started at.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetNameRequest": { ++ "id": "InstancesSetNameRequest", ++ "properties": { ++ "currentName": { ++ "description": "The current name of this resource, used to prevent conflicts. Provide the latest name when making a request to change name.", ++ "type": "string" ++ }, ++ "name": { ++ "description": "The name to be applied to the instance. Needs to be RFC 1035 compliant.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetSecurityPolicyRequest": { ++ "id": "InstancesSetSecurityPolicyRequest", ++ "properties": { ++ "networkInterfaces": { ++ "description": "The network interfaces that the security policy will be applied to. Network interfaces use the nicN naming format. You can only set a security policy for network interfaces with an access config.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "securityPolicy": { ++ "description": "A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesSetServiceAccountRequest": { ++ "id": "InstancesSetServiceAccountRequest", ++ "properties": { ++ "email": { ++ "description": "Email address of the service account.", ++ "type": "string" ++ }, ++ "scopes": { ++ "description": "The list of scopes to be made available for this service account.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstancesStartWithEncryptionKeyRequest": { ++ "id": "InstancesStartWithEncryptionKeyRequest", ++ "properties": { ++ "disks": { ++ "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key. In order to start the instance, the disk url and its corresponding key must be provided. If the disk is not protected with a customer-supplied encryption key it should not be specified.", ++ "items": { ++ "$ref": "CustomerEncryptionKeyProtectedDisk" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstantSnapshot": { ++ "description": "Represents a InstantSnapshot resource. You can use instant snapshots to create disk rollback points quickly..", ++ "id": "InstantSnapshot", ++ "properties": { ++ "architecture": { ++ "description": "[Output Only] The architecture of the instant snapshot. Valid values are ARM64 or X86_64.", ++ "enum": [ ++ "ARCHITECTURE_UNSPECIFIED", ++ "ARM64", ++ "X86_64" ++ ], ++ "enumDescriptions": [ ++ "Default value indicating Architecture is not set.", ++ "Machines with architecture ARM64", ++ "Machines with architecture X86_64" ++ ], ++ "type": "string" ++ }, ++ "creationTimestamp": { ++ "description": "[Output Only] Creation timestamp in RFC3339 text format.", ++ "type": "string" ++ }, ++ "description": { ++ "description": "An optional description of this resource. Provide this property when you create the resource.", ++ "type": "string" ++ }, ++ "diskSizeGb": { ++ "description": "[Output Only] Size of the source disk, specified in GB.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "kind": { ++ "default": "compute#instantSnapshot", ++ "description": "[Output Only] Type of the resource. Always compute#instantSnapshot for InstantSnapshot resources.", ++ "type": "string" ++ }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this InstantSnapshot, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a InstantSnapshot.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels to apply to this InstantSnapshot. These can be later modified by the setLabels method. Label values may be empty.", ++ "type": "object" ++ }, ++ "name": { ++ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "type": "string" ++ }, ++ "region": { ++ "description": "[Output Only] URL of the region where the instant snapshot resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", ++ "type": "string" ++ }, ++ "resourceStatus": { ++ "$ref": "InstantSnapshotResourceStatus", ++ "description": "[Output Only] Status information for the instant snapshot resource." ++ }, ++ "satisfiesPzs": { ++ "description": "[Output Only] Reserved for future use.", ++ "type": "boolean" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for the resource.", ++ "type": "string" ++ }, ++ "selfLinkWithId": { ++ "description": "[Output Only] Server-defined URL for this resource's resource id.", ++ "type": "string" ++ }, ++ "sourceDisk": { ++ "description": "URL of the source disk used to create this instant snapshot. Note that the source disk must be in the same zone/region as the instant snapshot to be created. This can be a full or valid partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", ++ "type": "string" ++ }, ++ "sourceDiskId": { ++ "description": "[Output Only] The ID value of the disk used to create this InstantSnapshot. This value may be used to determine whether the InstantSnapshot was taken from the current or a previous instance of a given disk name.", ++ "type": "string" ++ }, ++ "status": { ++ "description": "[Output Only] The status of the instantSnapshot. This can be CREATING, DELETING, FAILED, or READY.", ++ "enum": [ ++ "CREATING", ++ "DELETING", ++ "FAILED", ++ "READY" ++ ], ++ "enumDescriptions": [ ++ "InstantSnapshot creation is in progress.", ++ "InstantSnapshot is currently being deleted.", ++ "InstantSnapshot creation failed.", ++ "InstantSnapshot has been created successfully." ++ ], ++ "type": "string" ++ }, ++ "zone": { ++ "description": "[Output Only] URL of the zone where the instant snapshot resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstantSnapshotAggregatedList": { ++ "id": "InstantSnapshotAggregatedList", ++ "properties": { ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "additionalProperties": { ++ "$ref": "InstantSnapshotsScopedList", ++ "description": "[Output Only] Name of the scope containing this set of instantSnapshots." ++ }, ++ "description": "A list of InstantSnapshotsScopedList resources.", ++ "type": "object" ++ }, ++ "kind": { ++ "default": "compute#instantSnapshotAggregatedList", ++ "description": "[Output Only] Type of resource. Always compute#instantSnapshotAggregatedList for aggregated lists of instantSnapshots.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "unreachables": { ++ "description": "[Output Only] Unreachable resources.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstantSnapshotExportParams": { ++ "id": "InstantSnapshotExportParams", ++ "properties": { ++ "baseInstantSnapshot": { ++ "description": "An optional base instant snapshot that this resource is compared against. If not specified, all blocks of this resource are exported. The base instant snapshot and this resource must be created from the same disk. The base instant snapshot must be created earlier in time than this resource.", ++ "type": "string" ++ }, ++ "bucketName": { ++ "description": "The name of an existing bucket in Cloud Storage where the changed blocks will be stored. The Google Service Account must have read and write access to this bucket. The bucket has to be in the same region as this resource.", ++ "type": "string" ++ }, ++ "encryptionKey": { ++ "$ref": "CustomerEncryptionKey", ++ "description": "Encryption key used to encrypt the instant snapshot." ++ }, ++ "objectName": { ++ "description": "Name of the output Bigstore object storing the changed blocks. Object name must be less than 1024 bytes in length.", ++ "type": "string" ++ }, ++ "outputType": { ++ "description": "The format of the output file.", ++ "enum": [ ++ "INVALID", ++ "METADATA_AND_DATA", ++ "METADATA_ONLY" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstantSnapshotList": { ++ "description": "Contains a list of InstantSnapshot resources.", ++ "id": "InstantSnapshotList", ++ "properties": { ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "description": "A list of InstantSnapshot resources.", ++ "items": { ++ "$ref": "InstantSnapshot" ++ }, ++ "type": "array" ++ }, ++ "kind": { ++ "default": "compute#instantSnapshotList", ++ "description": "Type of resource.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InstantSnapshotResourceStatus": { ++ "id": "InstantSnapshotResourceStatus", + "properties": { +- "id": { +- "description": "The unique identifier for the security policy. This identifier is defined by the server.", +- "format": "uint64", ++ "storageSizeBytes": { ++ "description": "[Output Only] The storage size of this instant snapshot.", ++ "format": "int64", + "type": "string" +- }, +- "rules": { +- "description": "The rules that apply to the network.", +- "items": { +- "$ref": "SecurityPolicyRule" +- }, +- "type": "array" + } + }, + "type": "object" + }, +- "InstancesRemoveResourcePoliciesRequest": { +- "id": "InstancesRemoveResourcePoliciesRequest", +- "properties": { +- "resourcePolicies": { +- "description": "Resource policies to be removed from this instance.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "InstancesResumeRequest": { +- "id": "InstancesResumeRequest", ++ "InstantSnapshotsExportRequest": { ++ "id": "InstantSnapshotsExportRequest", + "properties": { +- "disks": { +- "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key. In order to resume the instance, the disk url and its corresponding key must be provided. If the disk is not protected with a customer-supplied encryption key it should not be specified.", +- "items": { +- "$ref": "CustomerEncryptionKeyProtectedDisk" +- }, +- "type": "array" +- }, +- "instanceEncryptionKey": { +- "$ref": "CustomerEncryptionKey", +- "description": "Decrypts data associated with an instance that is protected with a customer-supplied encryption key. If the instance you are starting is protected with a customer-supplied encryption key, the correct key must be provided otherwise the instance resume will not succeed." ++ "exportParams": { ++ "$ref": "InstantSnapshotExportParams", ++ "description": "Parameters to export the changed blocks." + } + }, + "type": "object" + }, +- "InstancesScopedList": { +- "id": "InstancesScopedList", ++ "InstantSnapshotsScopedList": { ++ "id": "InstantSnapshotsScopedList", + "properties": { +- "instances": { +- "description": "[Output Only] A list of instances contained in this scope.", ++ "instantSnapshots": { ++ "description": "[Output Only] A list of instantSnapshots contained in this scope.", + "items": { +- "$ref": "Instance" ++ "$ref": "InstantSnapshot" + }, + "type": "array" + }, + "warning": { +- "description": "[Output Only] Informational warning which replaces the list of instances when the list is empty.", ++ "description": "[Output Only] Informational warning which replaces the list of instantSnapshots when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", +@@ -49924,100 +52656,6 @@ + }, + "type": "object" + }, +- "InstancesSetLabelsRequest": { +- "id": "InstancesSetLabelsRequest", +- "properties": { +- "labelFingerprint": { +- "description": "Fingerprint of the previous set of labels for this resource, used to prevent conflicts. Provide the latest fingerprint value when making a request to add or change labels.", +- "format": "byte", +- "type": "string" +- }, +- "labels": { +- "additionalProperties": { +- "type": "string" +- }, +- "type": "object" +- } +- }, +- "type": "object" +- }, +- "InstancesSetMachineResourcesRequest": { +- "id": "InstancesSetMachineResourcesRequest", +- "properties": { +- "guestAccelerators": { +- "description": "A list of the type and count of accelerator cards attached to the instance.", +- "items": { +- "$ref": "AcceleratorConfig" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "InstancesSetMachineTypeRequest": { +- "id": "InstancesSetMachineTypeRequest", +- "properties": { +- "machineType": { +- "description": "Full or partial URL of the machine type resource. See Machine Types for a full list of machine types. For example: zones/us-central1-f/machineTypes/n1-standard-1", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InstancesSetMinCpuPlatformRequest": { +- "id": "InstancesSetMinCpuPlatformRequest", +- "properties": { +- "minCpuPlatform": { +- "description": "Minimum cpu/platform this instance should be started at.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InstancesSetNameRequest": { +- "id": "InstancesSetNameRequest", +- "properties": { +- "currentName": { +- "description": "The current name of this resource, used to prevent conflicts. Provide the latest name when making a request to change name.", +- "type": "string" +- }, +- "name": { +- "description": "The name to be applied to the instance. Needs to be RFC 1035 compliant.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InstancesSetServiceAccountRequest": { +- "id": "InstancesSetServiceAccountRequest", +- "properties": { +- "email": { +- "description": "Email address of the service account.", +- "type": "string" +- }, +- "scopes": { +- "description": "The list of scopes to be made available for this service account.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, +- "InstancesStartWithEncryptionKeyRequest": { +- "id": "InstancesStartWithEncryptionKeyRequest", +- "properties": { +- "disks": { +- "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key. In order to start the instance, the disk url and its corresponding key must be provided. If the disk is not protected with a customer-supplied encryption key it should not be specified.", +- "items": { +- "$ref": "CustomerEncryptionKeyProtectedDisk" +- }, +- "type": "array" +- } +- }, +- "type": "object" +- }, + "Int64RangeMatch": { + "description": "HttpRouteRuleMatch criteria for field values that must stay within the specified integer range.", + "id": "Int64RangeMatch", +@@ -50036,7 +52674,7 @@ + "type": "object" + }, + "Interconnect": { +- "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", ++ "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the Google Cloud network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", + "id": "Interconnect", + "properties": { + "adminEnabled": { +@@ -50171,6 +52809,10 @@ + "format": "int32", + "type": "integer" + }, ++ "remoteLocation": { ++ "description": "Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.", ++ "type": "string" ++ }, + "requestedLinkCount": { + "description": "Target number of physical links in the link bundle, as requested by the customer.", + "format": "int32", +@@ -50265,6 +52907,10 @@ + "description": "This field is not available.", + "type": "string" + }, ++ "configurationConstraints": { ++ "$ref": "InterconnectAttachmentConfigurationConstraints", ++ "description": "[Output Only] Constraints for this attachment, if any. The attachment does not work if these constraints are not met." ++ }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" +@@ -50330,7 +52976,7 @@ + "type": "string" + }, + "ipsecInternalAddresses": { +- "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool. Not currently available publicly. ", ++ "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool.", + "items": { + "type": "string" + }, +@@ -50396,6 +53042,10 @@ + "description": "[Output Only] URL of the region where the regional interconnect attachment resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, ++ "remoteService": { ++ "description": "[Output Only] If the attachment is on a Cross-Cloud Interconnect connection, this field contains the interconnect's remote location service provider. Example values: \"Amazon Web Services\" \"Microsoft Azure\". The field is set only for attachments on Cross-Cloud Interconnect connections. Its value is copied from the InterconnectRemoteLocation remoteService field.", ++ "type": "string" ++ }, + "router": { + "description": "URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network \u0026 region within which the Cloud Router is configured.", + "type": "string" +@@ -50442,6 +53092,11 @@ + ], + "type": "string" + }, ++ "subnetLength": { ++ "description": "Length of the IPv4 subnet mask. Allowed values: - 29 (default) - 30 The default value is 29, except for Cross-Cloud Interconnect connections that use an InterconnectRemoteLocation with a constraints.subnetLengthRange.min equal to 30. For example, connections that use an Azure remote location fall into this category. In these cases, the default value is 30, and requesting 29 returns an error. Where both 29 and 30 are allowed, 29 is preferred, because it gives Google Cloud Support more debugging visibility. ", ++ "format": "int32", ++ "type": "integer" ++ }, + "type": { + "description": "The type of interconnect attachment this is, which can take one of the following values: - DEDICATED: an attachment to a Dedicated Interconnect. - PARTNER: an attachment to a Partner Interconnect, created by the customer. - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner. ", + "enum": [ +@@ -50591,6 +53246,47 @@ + }, + "type": "object" + }, ++ "InterconnectAttachmentConfigurationConstraints": { ++ "id": "InterconnectAttachmentConfigurationConstraints", ++ "properties": { ++ "bgpMd5": { ++ "description": "[Output Only] Whether the attachment's BGP session requires/allows/disallows BGP MD5 authentication. This can take one of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. For example, a Cross-Cloud Interconnect connection to a remote cloud provider that requires BGP MD5 authentication has the interconnectRemoteLocation attachment_configuration_constraints.bgp_md5 field set to MD5_REQUIRED, and that property is propagated to the attachment. Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 is requested.", ++ "enum": [ ++ "MD5_OPTIONAL", ++ "MD5_REQUIRED", ++ "MD5_UNSUPPORTED" ++ ], ++ "enumDescriptions": [ ++ "MD5_OPTIONAL: BGP MD5 authentication is supported and can optionally be configured.", ++ "MD5_REQUIRED: BGP MD5 authentication must be configured.", ++ "MD5_UNSUPPORTED: BGP MD5 authentication must not be configured" ++ ], ++ "type": "string" ++ }, ++ "bgpPeerAsnRanges": { ++ "description": "[Output Only] List of ASN ranges that the remote location is known to support. Formatted as an array of inclusive ranges {min: min-value, max: max-value}. For example, [{min: 123, max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or anything in the range 64512-65534. This field is only advisory. Although the API accepts other ranges, these are the ranges that we recommend.", ++ "items": { ++ "$ref": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange": { ++ "id": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange", ++ "properties": { ++ "max": { ++ "format": "uint32", ++ "type": "integer" ++ }, ++ "min": { ++ "format": "uint32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "InterconnectAttachmentList": { + "description": "Response to the list request, and contains a list of interconnect attachments.", + "id": "InterconnectAttachmentList", +@@ -51374,111 +54070,413 @@ + }, + "type": "object" + }, +- "InterconnectLocationRegionInfo": { +- "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", +- "id": "InterconnectLocationRegionInfo", ++ "InterconnectLocationRegionInfo": { ++ "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", ++ "id": "InterconnectLocationRegionInfo", ++ "properties": { ++ "expectedRttMs": { ++ "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "locationPresence": { ++ "description": "Identifies the network presence of this location.", ++ "enum": [ ++ "GLOBAL", ++ "LOCAL_REGION", ++ "LP_GLOBAL", ++ "LP_LOCAL_REGION" ++ ], ++ "enumDescriptions": [ ++ "This region is not in any common network presence with this InterconnectLocation.", ++ "This region shares the same regional network presence as this InterconnectLocation.", ++ "[Deprecated] This region is not in any common network presence with this InterconnectLocation.", ++ "[Deprecated] This region shares the same regional network presence as this InterconnectLocation." ++ ], ++ "type": "string" ++ }, ++ "region": { ++ "description": "URL for the region of this location.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectOutageNotification": { ++ "description": "Description of a planned outage on this Interconnect.", ++ "id": "InterconnectOutageNotification", ++ "properties": { ++ "affectedCircuits": { ++ "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "description": { ++ "description": "A description about the purpose of the outage.", ++ "type": "string" ++ }, ++ "endTime": { ++ "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", ++ "format": "int64", ++ "type": "string" ++ }, ++ "issueType": { ++ "description": "Form this outage is expected to take, which can take one of the following values: - OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", ++ "enum": [ ++ "IT_OUTAGE", ++ "IT_PARTIAL_OUTAGE", ++ "OUTAGE", ++ "PARTIAL_OUTAGE" ++ ], ++ "enumDescriptions": [ ++ "[Deprecated] The Interconnect may be completely out of service for some or all of the specified window.", ++ "[Deprecated] Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth.", ++ "The Interconnect may be completely out of service for some or all of the specified window.", ++ "Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth." ++ ], ++ "type": "string" ++ }, ++ "name": { ++ "description": "Unique identifier for this outage notification.", ++ "type": "string" ++ }, ++ "source": { ++ "description": "The party that generated this notification, which can take the following value: - GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", ++ "enum": [ ++ "GOOGLE", ++ "NSRC_GOOGLE" ++ ], ++ "enumDescriptions": [ ++ "This notification was generated by Google.", ++ "[Deprecated] This notification was generated by Google." ++ ], ++ "type": "string" ++ }, ++ "startTime": { ++ "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", ++ "format": "int64", ++ "type": "string" ++ }, ++ "state": { ++ "description": "State of this notification, which can take one of the following values: - ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. - CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. - COMPLETED: The outage associated with this notification is complete. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", ++ "enum": [ ++ "ACTIVE", ++ "CANCELLED", ++ "COMPLETED", ++ "NS_ACTIVE", ++ "NS_CANCELED" ++ ], ++ "enumDescriptions": [ ++ "This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", ++ "The outage associated with this notification was cancelled before the outage was due to start.", ++ "The outage associated with this notification is complete.", ++ "[Deprecated] This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", ++ "[Deprecated] The outage associated with this notification was canceled before the outage was due to start." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocation": { ++ "description": "Represents a Cross-Cloud Interconnect Remote Location resource. You can use this resource to find remote location details about an Interconnect attachment (VLAN).", ++ "id": "InterconnectRemoteLocation", ++ "properties": { ++ "address": { ++ "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character.", ++ "type": "string" ++ }, ++ "attachmentConfigurationConstraints": { ++ "$ref": "InterconnectAttachmentConfigurationConstraints", ++ "description": "[Output Only] Subset of fields from InterconnectAttachment's |configurationConstraints| field that apply to all attachments for this remote location." ++ }, ++ "city": { ++ "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", ++ "type": "string" ++ }, ++ "constraints": { ++ "$ref": "InterconnectRemoteLocationConstraints", ++ "description": "[Output Only] Constraints on the parameters for creating Cross-Cloud Interconnect and associated InterconnectAttachments." ++ }, ++ "continent": { ++ "description": "[Output Only] Continent for this location, which can take one of the following values: - AFRICA - ASIA_PAC - EUROPE - NORTH_AMERICA - SOUTH_AMERICA ", ++ "enum": [ ++ "AFRICA", ++ "ASIA_PAC", ++ "EUROPE", ++ "NORTH_AMERICA", ++ "SOUTH_AMERICA" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ }, ++ "creationTimestamp": { ++ "description": "[Output Only] Creation timestamp in RFC3339 text format.", ++ "type": "string" ++ }, ++ "description": { ++ "description": "[Output Only] An optional description of the resource.", ++ "type": "string" ++ }, ++ "facilityProvider": { ++ "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX).", ++ "type": "string" ++ }, ++ "facilityProviderFacilityId": { ++ "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1).", ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "kind": { ++ "default": "compute#interconnectRemoteLocation", ++ "description": "[Output Only] Type of the resource. Always compute#interconnectRemoteLocation for interconnect remote locations.", ++ "type": "string" ++ }, ++ "lacp": { ++ "description": "[Output Only] Link Aggregation Control Protocol (LACP) constraints, which can take one of the following values: LACP_SUPPORTED, LACP_UNSUPPORTED", ++ "enum": [ ++ "LACP_SUPPORTED", ++ "LACP_UNSUPPORTED" ++ ], ++ "enumDescriptions": [ ++ "LACP_SUPPORTED: LACP is supported, and enabled by default on the Cross-Cloud Interconnect.", ++ "LACP_UNSUPPORTED: LACP is not supported and is not be enabled on this port. GetDiagnostics shows bundleAggregationType as \"static\". GCP does not support LAGs without LACP, so requestedLinkCount must be 1." ++ ], ++ "type": "string" ++ }, ++ "maxLagSize100Gbps": { ++ "description": "[Output Only] The maximum number of 100 Gbps ports supported in a link aggregation group (LAG). When linkType is 100 Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "maxLagSize10Gbps": { ++ "description": "[Output Only] The maximum number of 10 Gbps ports supported in a link aggregation group (LAG). When linkType is 10 Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "name": { ++ "description": "[Output Only] Name of the resource.", ++ "type": "string" ++ }, ++ "peeringdbFacilityId": { ++ "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb).", ++ "type": "string" ++ }, ++ "permittedConnections": { ++ "description": "[Output Only] Permitted connections.", ++ "items": { ++ "$ref": "InterconnectRemoteLocationPermittedConnections" ++ }, ++ "type": "array" ++ }, ++ "remoteService": { ++ "description": "[Output Only] Indicates the service provider present at the remote location. Example values: \"Amazon Web Services\", \"Microsoft Azure\".", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for the resource.", ++ "type": "string" ++ }, ++ "status": { ++ "description": "[Output Only] The status of this InterconnectRemoteLocation, which can take one of the following values: - CLOSED: The InterconnectRemoteLocation is closed and is unavailable for provisioning new Cross-Cloud Interconnects. - AVAILABLE: The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects. ", ++ "enum": [ ++ "AVAILABLE", ++ "CLOSED" ++ ], ++ "enumDescriptions": [ ++ "The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects.", ++ "The InterconnectRemoteLocation is closed for provisioning new Cross-Cloud Interconnects." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationConstraints": { ++ "id": "InterconnectRemoteLocationConstraints", ++ "properties": { ++ "portPairRemoteLocation": { ++ "description": "[Output Only] Port pair remote location constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to individual ports, but the UI uses this field when ordering a pair of ports, to prevent users from accidentally ordering something that is incompatible with their cloud provider. Specifically, when ordering a redundant pair of Cross-Cloud Interconnect ports, and one of them uses a remote location with portPairMatchingRemoteLocation set to matching, the UI requires that both ports use the same remote location.", ++ "enum": [ ++ "PORT_PAIR_MATCHING_REMOTE_LOCATION", ++ "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" ++ ], ++ "enumDescriptions": [ ++ "If PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider allocates ports in pairs, and the user should choose the same remote location for both ports.", ++ "If PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision a redundant pair of Cross-Cloud Interconnects using two different remote locations in the same city." ++ ], ++ "type": "string" ++ }, ++ "portPairVlan": { ++ "description": "[Output Only] Port pair VLAN constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, PORT_PAIR_MATCHING_VLAN", ++ "enum": [ ++ "PORT_PAIR_MATCHING_VLAN", ++ "PORT_PAIR_UNCONSTRAINED_VLAN" ++ ], ++ "enumDescriptions": [ ++ "If PORT_PAIR_MATCHING_VLAN, the Interconnect for this attachment is part of a pair of ports that should have matching VLAN allocations. This occurs with Cross-Cloud Interconnect to Azure remote locations. While GCP's API does not explicitly group pairs of ports, the UI uses this field to ensure matching VLAN ids when configuring a redundant VLAN pair.", ++ "PORT_PAIR_UNCONSTRAINED_VLAN means there is no constraint." ++ ], ++ "type": "string" ++ }, ++ "subnetLengthRange": { ++ "$ref": "InterconnectRemoteLocationConstraintsSubnetLengthRange", ++ "description": "[Output Only] [min-length, max-length] The minimum and maximum value (inclusive) for the IPv4 subnet length. For example, an interconnectRemoteLocation for Azure has {min: 30, max: 30} because Azure requires /30 subnets. This range specifies the values supported by both cloud providers. Interconnect currently supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no constraint on IPv4 subnet length, the range would thus be {min: 29, max: 30}. " ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationConstraintsSubnetLengthRange": { ++ "id": "InterconnectRemoteLocationConstraintsSubnetLengthRange", ++ "properties": { ++ "max": { ++ "format": "int32", ++ "type": "integer" ++ }, ++ "min": { ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationList": { ++ "description": "Response to the list request, and contains a list of interconnect remote locations.", ++ "id": "InterconnectRemoteLocationList", ++ "properties": { ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "description": "A list of InterconnectRemoteLocation resources.", ++ "items": { ++ "$ref": "InterconnectRemoteLocation" ++ }, ++ "type": "array" ++ }, ++ "kind": { ++ "default": "compute#interconnectRemoteLocationList", ++ "description": "[Output Only] Type of resource. Always compute#interconnectRemoteLocationList for lists of interconnect remote locations.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token lets you get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationPermittedConnections": { ++ "id": "InterconnectRemoteLocationPermittedConnections", + "properties": { +- "expectedRttMs": { +- "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", +- "format": "int64", +- "type": "string" +- }, +- "locationPresence": { +- "description": "Identifies the network presence of this location.", +- "enum": [ +- "GLOBAL", +- "LOCAL_REGION", +- "LP_GLOBAL", +- "LP_LOCAL_REGION" +- ], +- "enumDescriptions": [ +- "This region is not in any common network presence with this InterconnectLocation.", +- "This region shares the same regional network presence as this InterconnectLocation.", +- "[Deprecated] This region is not in any common network presence with this InterconnectLocation.", +- "[Deprecated] This region shares the same regional network presence as this InterconnectLocation." +- ], +- "type": "string" +- }, +- "region": { +- "description": "URL for the region of this location.", +- "type": "string" +- } +- }, +- "type": "object" +- }, +- "InterconnectOutageNotification": { +- "description": "Description of a planned outage on this Interconnect.", +- "id": "InterconnectOutageNotification", +- "properties": { +- "affectedCircuits": { +- "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", +- "items": { +- "type": "string" +- }, +- "type": "array" +- }, +- "description": { +- "description": "A description about the purpose of the outage.", +- "type": "string" +- }, +- "endTime": { +- "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", +- "format": "int64", +- "type": "string" +- }, +- "issueType": { +- "description": "Form this outage is expected to take, which can take one of the following values: - OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", +- "enum": [ +- "IT_OUTAGE", +- "IT_PARTIAL_OUTAGE", +- "OUTAGE", +- "PARTIAL_OUTAGE" +- ], +- "enumDescriptions": [ +- "[Deprecated] The Interconnect may be completely out of service for some or all of the specified window.", +- "[Deprecated] Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth.", +- "The Interconnect may be completely out of service for some or all of the specified window.", +- "Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth." +- ], +- "type": "string" +- }, +- "name": { +- "description": "Unique identifier for this outage notification.", +- "type": "string" +- }, +- "source": { +- "description": "The party that generated this notification, which can take the following value: - GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", +- "enum": [ +- "GOOGLE", +- "NSRC_GOOGLE" +- ], +- "enumDescriptions": [ +- "This notification was generated by Google.", +- "[Deprecated] This notification was generated by Google." +- ], +- "type": "string" +- }, +- "startTime": { +- "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", +- "format": "int64", +- "type": "string" +- }, +- "state": { +- "description": "State of this notification, which can take one of the following values: - ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. - CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. - COMPLETED: The outage associated with this notification is complete. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", +- "enum": [ +- "ACTIVE", +- "CANCELLED", +- "COMPLETED", +- "NS_ACTIVE", +- "NS_CANCELED" +- ], +- "enumDescriptions": [ +- "This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", +- "The outage associated with this notification was cancelled before the outage was due to start.", +- "The outage associated with this notification is complete.", +- "[Deprecated] This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", +- "[Deprecated] The outage associated with this notification was canceled before the outage was due to start." +- ], ++ "interconnectLocation": { ++ "description": "[Output Only] URL of an Interconnect location that is permitted to connect to this Interconnect remote location.", + "type": "string" + } + }, +@@ -52221,7 +55219,7 @@ + "type": "integer" + }, + "guestAcceleratorType": { +- "description": "The accelerator type resource name, not a full URL, e.g. 'nvidia-tesla-k80'.", ++ "description": "The accelerator type resource name, not a full URL, e.g. nvidia-tesla-t4.", + "type": "string" + } + }, +@@ -52717,7 +55715,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -52992,7 +55990,7 @@ + "type": "string" + }, + "gatewayIPv4": { +- "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", ++ "description": "[Output Only] The gateway address for default routing out of the network, selected by Google Cloud.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", + "type": "string" + }, +@@ -53099,7 +56097,7 @@ + "type": "string" + }, + "fingerprint": { +- "description": "[Output Only] Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", ++ "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", + "format": "byte", + "type": "string" + }, +@@ -53124,7 +56122,7 @@ + "type": "string" + }, + "network": { +- "description": "[Output Only] The URL of the network which the Network Attachment belongs to.", ++ "description": "[Output Only] The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks.", + "type": "string" + }, + "producerAcceptLists": { +@@ -53288,7 +56286,7 @@ + "id": "NetworkAttachmentConnectedEndpoint", + "properties": { + "ipAddress": { +- "description": "The IP address assigned to the producer instance network interface. This value will be a range in case of Serverless.", ++ "description": "The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless.", + "type": "string" + }, + "projectIdOrNum": { +@@ -53296,7 +56294,7 @@ + "type": "string" + }, + "secondaryIpCidrRanges": { +- "description": "Alias IP ranges from the same subnetwork", ++ "description": "Alias IP ranges from the same subnetwork.", + "items": { + "type": "string" + }, +@@ -53855,6 +56853,10 @@ + "description": "Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used.", + "type": "string" + }, ++ "ipv6Address": { ++ "description": "Optional IPv6 address of network endpoint.", ++ "type": "string" ++ }, + "port": { + "description": "Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used.", + "format": "int32", +@@ -54746,7 +57748,7 @@ + "type": "integer" + }, + "stackType": { +- "description": "The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations.", ++ "description": "The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations.", + "enum": [ + "IPV4_IPV6", + "IPV4_ONLY" +@@ -59416,6 +62418,7 @@ + "COMMITTED_NVIDIA_A100_80GB_GPUS", + "COMMITTED_NVIDIA_A100_GPUS", + "COMMITTED_NVIDIA_K80_GPUS", ++ "COMMITTED_NVIDIA_L4_GPUS", + "COMMITTED_NVIDIA_P100_GPUS", + "COMMITTED_NVIDIA_P4_GPUS", + "COMMITTED_NVIDIA_T4_GPUS", +@@ -59467,11 +62470,15 @@ + "NETWORK_ATTACHMENTS", + "NETWORK_ENDPOINT_GROUPS", + "NETWORK_FIREWALL_POLICIES", ++ "NET_LB_SECURITY_POLICIES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION", + "NODE_GROUPS", + "NODE_TEMPLATES", + "NVIDIA_A100_80GB_GPUS", + "NVIDIA_A100_GPUS", + "NVIDIA_K80_GPUS", ++ "NVIDIA_L4_GPUS", + "NVIDIA_P100_GPUS", + "NVIDIA_P100_VWS_GPUS", + "NVIDIA_P4_GPUS", +@@ -59486,6 +62493,7 @@ + "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS", + "PREEMPTIBLE_NVIDIA_A100_GPUS", + "PREEMPTIBLE_NVIDIA_K80_GPUS", ++ "PREEMPTIBLE_NVIDIA_L4_GPUS", + "PREEMPTIBLE_NVIDIA_P100_GPUS", + "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", + "PREEMPTIBLE_NVIDIA_P4_GPUS", +@@ -59510,6 +62518,7 @@ + "ROUTES", + "SECURITY_POLICIES", + "SECURITY_POLICIES_PER_REGION", ++ "SECURITY_POLICY_ADVANCED_RULES_PER_REGION", + "SECURITY_POLICY_CEVAL_RULES", + "SECURITY_POLICY_RULES", + "SECURITY_POLICY_RULES_PER_REGION", +@@ -59567,6 +62576,7 @@ + "", + "", + "", ++ "", + "Guest CPUs", + "", + "", +@@ -59659,6 +62669,12 @@ + "", + "", + "", ++ "", ++ "", ++ "", ++ "", ++ "", ++ "", + "The total number of snapshots allowed for a single project.", + "", + "", +@@ -59812,6 +62828,20 @@ + }, + "type": "object" + }, ++ "RegionAddressesMoveRequest": { ++ "id": "RegionAddressesMoveRequest", ++ "properties": { ++ "description": { ++ "description": "An optional destination address description if intended to be different from the source.", ++ "type": "string" ++ }, ++ "destinationAddress": { ++ "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project/regions/region /addresses/address - projects/project/regions/region/addresses/address Note that destination project must be different from the source project. So /regions/region/addresses/address is not valid partial url.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "RegionAutoscalerList": { + "description": "Contains a list of autoscalers.", + "id": "RegionAutoscalerList", +@@ -60101,6 +63131,16 @@ + }, + "type": "object" + }, ++ "RegionDisksStartAsyncReplicationRequest": { ++ "id": "RegionDisksStartAsyncReplicationRequest", ++ "properties": { ++ "asyncSecondaryDisk": { ++ "description": "The secondary disk to start asynchronous replication to. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "RegionInstanceGroupList": { + "description": "Contains a list of InstanceGroup resources.", + "id": "RegionInstanceGroupList", +@@ -60845,6 +63885,16 @@ + }, + "type": "object" + }, ++ "RegionInstantSnapshotsExportRequest": { ++ "id": "RegionInstantSnapshotsExportRequest", ++ "properties": { ++ "exportParams": { ++ "$ref": "InstantSnapshotExportParams", ++ "description": "Parameters to export the changed blocks." ++ } ++ }, ++ "type": "object" ++ }, + "RegionList": { + "description": "Contains a list of region resources.", + "id": "RegionList", +@@ -61741,6 +64791,10 @@ + "description": { + "type": "string" + }, ++ "diskConsistencyGroupPolicy": { ++ "$ref": "ResourcePolicyDiskConsistencyGroupPolicy", ++ "description": "Resource policy for disk consistency groups." ++ }, + "groupPlacementPolicy": { + "$ref": "ResourcePolicyGroupPlacementPolicy", + "description": "Resource policy for instances for placement configuration." +@@ -61956,6 +65010,12 @@ + }, + "type": "object" + }, ++ "ResourcePolicyDiskConsistencyGroupPolicy": { ++ "description": "Resource policy for disk consistency groups.", ++ "id": "ResourcePolicyDiskConsistencyGroupPolicy", ++ "properties": {}, ++ "type": "object" ++ }, + "ResourcePolicyGroupPlacementPolicy": { + "description": "A GroupPlacementPolicy specifies resource placement configuration. It specifies the failure bucket separation as well as network locality", + "id": "ResourcePolicyGroupPlacementPolicy", +@@ -63095,6 +66155,18 @@ + "$ref": "RouterBgpPeerBfd", + "description": "BFD configuration for the BGP peering." + }, ++ "customLearnedIpRanges": { ++ "description": "A list of user-defined custom learned route IP address ranges for a BGP session.", ++ "items": { ++ "$ref": "RouterBgpPeerCustomLearnedIpRange" ++ }, ++ "type": "array" ++ }, ++ "customLearnedRoutePriority": { ++ "description": "The user-defined custom learned route priority for a BGP session. This value is applied to all custom learned route ranges for the session. You can choose a value from `0` to `65335`. If you don't provide a value, Google Cloud assigns a priority of `100` to the ranges.", ++ "format": "int32", ++ "type": "integer" ++ }, + "enable": { + "description": "The status of the BGP peer connection. If set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE.", + "enum": [ +@@ -63209,6 +66281,16 @@ + }, + "type": "object" + }, ++ "RouterBgpPeerCustomLearnedIpRange": { ++ "id": "RouterBgpPeerCustomLearnedIpRange", ++ "properties": { ++ "range": { ++ "description": "The custom learned route IP address range. Must be a valid CIDR-formatted prefix. If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, a `/32` singular IP address range, and, for IPv6, `/128`.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "RouterInterface": { + "id": "RouterInterface", + "properties": { +@@ -63411,6 +66493,22 @@ + "description": "Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided.", + "id": "RouterNat", + "properties": { ++ "autoNetworkTier": { ++ "description": "The network tier to use when automatically reserving IP addresses. Must be one of: PREMIUM, STANDARD. If not specified, PREMIUM tier will be used.", ++ "enum": [ ++ "FIXED_STANDARD", ++ "PREMIUM", ++ "STANDARD", ++ "STANDARD_OVERRIDES_FIXED_STANDARD" ++ ], ++ "enumDescriptions": [ ++ "Public internet quality with fixed bandwidth.", ++ "High quality, Google-grade network tier, support for all networking products.", ++ "Public internet quality, only limited support for other networking products.", ++ "(Output only) Temporary tier for FIXED_STANDARD when fixed standard tier is expired or not configured." ++ ], ++ "type": "string" ++ }, + "drainNatIps": { + "description": "A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT only.", + "items": { +@@ -63491,7 +66589,7 @@ + "type": "array" + }, + "sourceSubnetworkIpRangesToNat": { +- "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", ++ "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then there should not be any other Router.Nat section in any Router for this network in this region.", + "enum": [ + "ALL_SUBNETWORKS_ALL_IP_RANGES", + "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", +@@ -64804,6 +67902,13 @@ + "" + ], + "type": "string" ++ }, ++ "userDefinedFields": { ++ "description": "Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: \"ipv4_fragment_offset\" base: IPV4 offset: 6 size: 2 mask: \"0x1fff\"", ++ "items": { ++ "$ref": "SecurityPolicyUserDefinedField" ++ }, ++ "type": "array" + } + }, + "type": "object" +@@ -64846,15 +67951,15 @@ + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig": { +- "description": "Configuration options for L7 DDoS detection.", ++ "description": "Configuration options for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig", + "properties": { + "enable": { +- "description": "If set to true, enables CAAP for L7 DDoS detection.", ++ "description": "If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "boolean" + }, + "ruleVisibility": { +- "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules.", ++ "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "enum": [ + "PREMIUM", + "STANDARD" +@@ -64896,6 +68001,13 @@ + "" + ], + "type": "string" ++ }, ++ "userIpRequestHeaders": { ++ "description": "An optional list of case-insensitive request header names to use for resolving the callers client IP address.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" + } + }, + "type": "object" +@@ -65073,7 +68185,7 @@ + "id": "SecurityPolicyRecaptchaOptionsConfig", + "properties": { + "redirectSiteKey": { +- "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used.", ++ "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + } + }, +@@ -65093,7 +68205,7 @@ + "id": "SecurityPolicyRule", + "properties": { + "action": { +- "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", ++ "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", + "type": "string" + }, + "description": { +@@ -65118,7 +68230,7 @@ + }, + "headerAction": { + "$ref": "SecurityPolicyRuleHttpHeaderAction", +- "description": "Optional, additional actions that are performed on headers." ++ "description": "Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "kind": { + "default": "compute#securityPolicyRule", +@@ -65129,6 +68241,10 @@ + "$ref": "SecurityPolicyRuleMatcher", + "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced." + }, ++ "networkMatch": { ++ "$ref": "SecurityPolicyRuleNetworkMatcher", ++ "description": "A match condition that incoming packets are evaluated against for CLOUD_ARMOR_NETWORK security policies. If it matches, the corresponding 'action' is enforced. The match criteria for a rule consists of built-in match fields (like 'srcIpRanges') and potentially multiple user-defined match fields ('userDefinedFields'). Field values may be extracted directly from the packet or derived from it (e.g. 'srcRegionCodes'). Some fields may not be present in every packet (e.g. 'srcPorts'). A user-defined field is only present if the base header is found in the packet and the entire field is in bounds. Each match field may specify which values can match it, listing one or more ranges, prefixes, or exact values that are considered a match for the field. A field value must be present in order to match a specified match field. If no match values are specified for a match field, then any field value is considered to match it, and it's not required to be present. For strings specifying '*' is also equivalent to match all. For a packet to match a rule, all specified match fields must match the corresponding field values derived from the packet. Example: networkMatch: srcIpRanges: - \"192.0.2.0/24\" - \"198.51.100.0/24\" userDefinedFields: - name: \"ipv4_fragment_offset\" values: - \"1-0x1fff\" The above match condition matches packets with a source IP in 192.0.2.0/24 or 198.51.100.0/24 and a user-defined field named \"ipv4_fragment_offset\" with a value between 1 and 0x1fff inclusive." ++ }, + "preconfiguredWafConfig": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfig", + "description": "Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect." +@@ -65148,7 +68264,7 @@ + }, + "redirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action. Cannot be specified for any other actions." ++ "description": "Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "ruleNumber": { + "description": "Identifier for the rule. This is only unique within the given security policy. This can only be set during rule creation, if rule number is not specified it will be generated by the server.", +@@ -65214,7 +68330,11 @@ + }, + "expr": { + "$ref": "Expr", +- "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." ++ "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing `evaluateThreatIntelligence` require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies." ++ }, ++ "exprOptions": { ++ "$ref": "SecurityPolicyRuleMatcherExprOptions", ++ "description": "The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr')." + }, + "versionedExpr": { + "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", +@@ -65275,6 +68395,117 @@ + }, + "type": "object" + }, ++ "SecurityPolicyRuleMatcherExprOptions": { ++ "id": "SecurityPolicyRuleMatcherExprOptions", ++ "properties": { ++ "recaptchaOptions": { ++ "$ref": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", ++ "description": "reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field will have no effect." ++ } ++ }, ++ "type": "object" ++ }, ++ "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions": { ++ "id": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", ++ "properties": { ++ "actionTokenSiteKeys": { ++ "description": "A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "sessionTokenSiteKeys": { ++ "description": "A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "SecurityPolicyRuleNetworkMatcher": { ++ "description": "Represents a match condition that incoming network traffic is evaluated against.", ++ "id": "SecurityPolicyRuleNetworkMatcher", ++ "properties": { ++ "destIpRanges": { ++ "description": "Destination IPv4/IPv6 addresses or CIDR prefixes, in standard text format.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "destPorts": { ++ "description": "Destination port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. \"80\") or range (e.g. \"0-1023\").", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "ipProtocols": { ++ "description": "IPv4 protocol / IPv6 next header (after extension headers). Each element can be an 8-bit unsigned decimal number (e.g. \"6\"), range (e.g. \"253-254\"), or one of the following protocol names: \"tcp\", \"udp\", \"icmp\", \"esp\", \"ah\", \"ipip\", or \"sctp\".", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "srcAsns": { ++ "description": "BGP Autonomous System Number associated with the source IP address.", ++ "items": { ++ "format": "uint32", ++ "type": "integer" ++ }, ++ "type": "array" ++ }, ++ "srcIpRanges": { ++ "description": "Source IPv4/IPv6 addresses or CIDR prefixes, in standard text format.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "srcPorts": { ++ "description": "Source port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. \"80\") or range (e.g. \"0-1023\").", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "srcRegionCodes": { ++ "description": "Two-letter ISO 3166-1 alpha-2 country code associated with the source IP address.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "userDefinedFields": { ++ "description": "User-defined fields. Each element names a defined field and lists the matching values for that field.", ++ "items": { ++ "$ref": "SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch": { ++ "id": "SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch", ++ "properties": { ++ "name": { ++ "description": "Name of the user-defined field, as given in the definition.", ++ "type": "string" ++ }, ++ "values": { ++ "description": "Matching values of the field. Each element can be a 32-bit unsigned decimal or hexadecimal (starting with \"0x\") number (e.g. \"64\") or range (e.g. \"0x400-0x7ff\").", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "SecurityPolicyRulePreconfiguredWafConfig": { + "id": "SecurityPolicyRulePreconfiguredWafConfig", + "properties": { +@@ -65415,12 +68646,12 @@ + "type": "string" + }, + "exceedAction": { +- "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below.", ++ "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below. The `redirect` action is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + }, + "exceedRedirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect." ++ "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "rateLimitThreshold": { + "$ref": "SecurityPolicyRuleRateLimitOptionsThreshold", +@@ -65503,6 +68734,46 @@ + }, + "type": "object" + }, ++ "SecurityPolicyUserDefinedField": { ++ "id": "SecurityPolicyUserDefinedField", ++ "properties": { ++ "base": { ++ "description": "The base relative to which 'offset' is measured. Possible values are: - IPV4: Points to the beginning of the IPv4 header. - IPV6: Points to the beginning of the IPv6 header. - TCP: Points to the beginning of the TCP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. - UDP: Points to the beginning of the UDP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. required", ++ "enum": [ ++ "IPV4", ++ "IPV6", ++ "TCP", ++ "UDP" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ }, ++ "mask": { ++ "description": "If specified, apply this mask (bitwise AND) to the field to ignore bits before matching. Encoded as a hexadecimal number (starting with \"0x\"). The last byte of the field (in network byte order) corresponds to the least significant byte of the mask.", ++ "type": "string" ++ }, ++ "name": { ++ "description": "The name of this field. Must be unique within the policy.", ++ "type": "string" ++ }, ++ "offset": { ++ "description": "Offset of the first byte of the field (in network byte order) relative to 'base'.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "size": { ++ "description": "Size of the field in bytes. Valid values: 1-4.", ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "SecuritySettings": { + "description": "The authentication and authorization settings for a BackendService.", + "id": "SecuritySettings", +@@ -65516,11 +68787,11 @@ + "description": "The configuration needed to generate a signature for access to private storage buckets that support AWS's Signature Version 4 for authentication. Allowed only for INTERNET_IP_PORT and INTERNET_FQDN_PORT NEG backends." + }, + "clientTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted.", + "type": "string" + }, + "subjectAltNames": { +- "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact.", ++ "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode).", + "items": { + "type": "string" + }, +@@ -65597,7 +68868,7 @@ + "type": "object" + }, + "ServiceAttachment": { +- "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service. next tag = 20", ++ "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service.", + "id": "ServiceAttachment", + "properties": { + "connectedEndpoints": { +@@ -65694,6 +68965,10 @@ + "$ref": "Uint128", + "description": "[Output Only] An 128-bit global unique ID of the PSC service attachment." + }, ++ "reconcileConnections": { ++ "description": "This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. - If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . - If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. For newly created service attachment, this boolean defaults to true.", ++ "type": "boolean" ++ }, + "region": { + "description": "[Output Only] URL of the region where the service attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" +@@ -66404,6 +69679,7 @@ + "name": { + "annotations": { + "required": [ ++ "compute.disks.createSnapshot", + "compute.snapshots.insert" + ] + }, +@@ -66447,6 +69723,14 @@ + "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name.", + "type": "string" + }, ++ "sourceInstantSnapshot": { ++ "description": "The source instant snapshot used to create this snapshot. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /instantSnapshots/instantSnapshot - projects/project/zones/zone/instantSnapshots/instantSnapshot - zones/zone/instantSnapshots/instantSnapshot ", ++ "type": "string" ++ }, ++ "sourceInstantSnapshotId": { ++ "description": "[Output Only] The unique ID of the instant snapshot used to create this snapshot. This value identifies the exact instant snapshot that was used to create this persistent disk. For example, if you created the persistent disk from an instant snapshot that was later deleted and recreated under the same name, the source instant snapshot ID would identify the exact instant snapshot that was used.", ++ "type": "string" ++ }, + "sourceSnapshotSchedulePolicy": { + "description": "[Output Only] URL of the resource policy which created this scheduled snapshot.", + "type": "string" +@@ -67901,7 +71185,7 @@ + "type": "string" + }, + "enableFlowLogs": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "externalIpv6Prefix": { +@@ -67983,7 +71267,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "INTERNAL_HTTPS_LOAD_BALANCER", + "PRIVATE", +@@ -68009,7 +71293,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -68330,7 +71614,7 @@ + "type": "string" + }, + "enable": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. Flow logging isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "filterExpr": { +@@ -68899,6 +72183,11 @@ + }, + "type": "array" + }, ++ "httpKeepAliveTimeoutSec": { ++ "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", ++ "format": "int32", ++ "type": "integer" ++ }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", +@@ -69286,7 +72575,7 @@ + "id": "TargetHttpsProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetHttpsProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetHttpsProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -69342,7 +72631,7 @@ + "type": "string" + }, + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -69365,6 +72654,11 @@ + }, + "type": "array" + }, ++ "httpKeepAliveTimeoutSec": { ++ "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", ++ "format": "int32", ++ "type": "integer" ++ }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", +@@ -69407,7 +72701,7 @@ + "type": "string" + }, + "serverTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted.", + "type": "string" + }, + "sslCertificates": { +@@ -69720,6 +73014,10 @@ + "description": "The URL of the network this target instance uses to forward traffic. If not specified, the traffic will be forwarded to the network that the default network interface belongs to.", + "type": "string" + }, ++ "securityPolicy": { ++ "description": "[Output Only] The resource URL for the security policy associated with this target instance.", ++ "type": "string" ++ }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" +@@ -70134,6 +73432,10 @@ + "description": "[Output Only] URL of the region where the target pool resides.", + "type": "string" + }, ++ "securityPolicy": { ++ "description": "[Output Only] The resource URL for the security policy associated with this target pool.", ++ "type": "string" ++ }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" +@@ -70606,7 +73908,7 @@ + "id": "TargetSslProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetSslProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetSslProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -70648,7 +73950,7 @@ + "id": "TargetSslProxy", + "properties": { + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -72404,7 +75706,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "INTERNAL_HTTPS_LOAD_BALANCER", + "PRIVATE", +@@ -72422,7 +75724,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -72839,6 +76141,18 @@ + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, ++ "gatewayIpVersion": { ++ "description": "The IP family of the gateway IPs for the HA-VPN gateway interfaces. If not specified, IPV4 will be used.", ++ "enum": [ ++ "IPV4", ++ "IPV6" ++ ], ++ "enumDescriptions": [ ++ "Every HA-VPN gateway interface is configured with an IPv4 address.", ++ "Every HA-VPN gateway interface is configured with an IPv6 address." ++ ], ++ "type": "string" ++ }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", +@@ -73209,7 +76523,7 @@ + "type": "integer" + }, + "peerGatewayInterface": { +- "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or GCP VPN gateway.", ++ "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "format": "uint32", + "type": "integer" + }, +@@ -73221,7 +76535,7 @@ + "type": "object" + }, + "VpnGatewayStatusVpnConnection": { +- "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be a external VPN gateway or GCP VPN gateway.", ++ "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "id": "VpnGatewayStatusVpnConnection", + "properties": { + "peerExternalGateway": { +@@ -73262,6 +76576,10 @@ + "ipAddress": { + "description": "[Output Only] IP address for this VPN interface associated with the VPN gateway. The IP address could be either a regional external IP address or a regional internal IP address. The two IP addresses for a VPN gateway must be all regional external or regional internal IP addresses. There cannot be a mix of regional external IP addresses and regional internal IP addresses. For HA VPN over Cloud Interconnect, the IP addresses for both interfaces could either be regional internal IP addresses or regional external IP addresses. For regular (non HA VPN over Cloud Interconnect) HA VPN tunnels, the IP address must be a regional external IP address.", + "type": "string" ++ }, ++ "ipv6Address": { ++ "description": "[Output Only] IPv6 address for this VPN interface associated with the VPN gateway. The IPv6 address must be a regional external IPv6 address. The format is RFC 5952 format (e.g. 2001:db8::2d9:51:0:0).", ++ "type": "string" + } + }, + "type": "object" +diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +index 0737664ae8d..e614cb37286 100644 +--- a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go ++++ b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +@@ -75,6 +75,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "compute:beta" + const apiName = "compute" +@@ -170,8 +171,10 @@ func New(client *http.Client) (*Service, error) { + s.InstanceGroups = NewInstanceGroupsService(s) + s.InstanceTemplates = NewInstanceTemplatesService(s) + s.Instances = NewInstancesService(s) ++ s.InstantSnapshots = NewInstantSnapshotsService(s) + s.InterconnectAttachments = NewInterconnectAttachmentsService(s) + s.InterconnectLocations = NewInterconnectLocationsService(s) ++ s.InterconnectRemoteLocations = NewInterconnectRemoteLocationsService(s) + s.Interconnects = NewInterconnectsService(s) + s.LicenseCodes = NewLicenseCodesService(s) + s.Licenses = NewLicensesService(s) +@@ -201,6 +204,7 @@ func New(client *http.Client) (*Service, error) { + s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) + s.RegionInstanceTemplates = NewRegionInstanceTemplatesService(s) + s.RegionInstances = NewRegionInstancesService(s) ++ s.RegionInstantSnapshots = NewRegionInstantSnapshotsService(s) + s.RegionNetworkEndpointGroups = NewRegionNetworkEndpointGroupsService(s) + s.RegionNetworkFirewallPolicies = NewRegionNetworkFirewallPoliciesService(s) + s.RegionNotificationEndpoints = NewRegionNotificationEndpointsService(s) +@@ -296,10 +300,14 @@ type Service struct { + + Instances *InstancesService + ++ InstantSnapshots *InstantSnapshotsService ++ + InterconnectAttachments *InterconnectAttachmentsService + + InterconnectLocations *InterconnectLocationsService + ++ InterconnectRemoteLocations *InterconnectRemoteLocationsService ++ + Interconnects *InterconnectsService + + LicenseCodes *LicenseCodesService +@@ -358,6 +366,8 @@ type Service struct { + + RegionInstances *RegionInstancesService + ++ RegionInstantSnapshots *RegionInstantSnapshotsService ++ + RegionNetworkEndpointGroups *RegionNetworkEndpointGroupsService + + RegionNetworkFirewallPolicies *RegionNetworkFirewallPoliciesService +@@ -670,6 +680,15 @@ type InstancesService struct { + s *Service + } + ++func NewInstantSnapshotsService(s *Service) *InstantSnapshotsService { ++ rs := &InstantSnapshotsService{s: s} ++ return rs ++} ++ ++type InstantSnapshotsService struct { ++ s *Service ++} ++ + func NewInterconnectAttachmentsService(s *Service) *InterconnectAttachmentsService { + rs := &InterconnectAttachmentsService{s: s} + return rs +@@ -688,6 +707,15 @@ type InterconnectLocationsService struct { + s *Service + } + ++func NewInterconnectRemoteLocationsService(s *Service) *InterconnectRemoteLocationsService { ++ rs := &InterconnectRemoteLocationsService{s: s} ++ return rs ++} ++ ++type InterconnectRemoteLocationsService struct { ++ s *Service ++} ++ + func NewInterconnectsService(s *Service) *InterconnectsService { + rs := &InterconnectsService{s: s} + return rs +@@ -949,6 +977,15 @@ type RegionInstancesService struct { + s *Service + } + ++func NewRegionInstantSnapshotsService(s *Service) *RegionInstantSnapshotsService { ++ rs := &RegionInstantSnapshotsService{s: s} ++ return rs ++} ++ ++type RegionInstantSnapshotsService struct { ++ s *Service ++} ++ + func NewRegionNetworkEndpointGroupsService(s *Service) *RegionNetworkEndpointGroupsService { + rs := &RegionNetworkEndpointGroupsService{s: s} + return rs +@@ -1978,32 +2015,35 @@ func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + // AccessConfig: An access configuration attached to an instance's + // network interface. Only one access config per instance is supported. + type AccessConfig struct { +- // ExternalIpv6: The first IPv6 address of the external IPv6 range +- // associated with this instance, prefix length is stored in +- // externalIpv6PrefixLength in ipv6AccessConfig. To use a static +- // external IP address, it must be unused and in the same region as the +- // instance's zone. If not specified, Google Cloud will automatically +- // assign an external IPv6 address from the instance's subnetwork. ++ // ExternalIpv6: Applies to ipv6AccessConfigs only. The first IPv6 ++ // address of the external IPv6 range associated with this instance, ++ // prefix length is stored in externalIpv6PrefixLength in ++ // ipv6AccessConfig. To use a static external IP address, it must be ++ // unused and in the same region as the instance's zone. If not ++ // specified, Google Cloud will automatically assign an external IPv6 ++ // address from the instance's subnetwork. + ExternalIpv6 string `json:"externalIpv6,omitempty"` + +- // ExternalIpv6PrefixLength: The prefix length of the external IPv6 +- // range. ++ // ExternalIpv6PrefixLength: Applies to ipv6AccessConfigs only. The ++ // prefix length of the external IPv6 range. + ExternalIpv6PrefixLength int64 `json:"externalIpv6PrefixLength,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#accessConfig + // for access configs. + Kind string `json:"kind,omitempty"` + +- // Name: The name of this access configuration. The default and +- // recommended name is External NAT, but you can use any arbitrary +- // string, such as My external IP or Network Access. ++ // Name: The name of this access configuration. In accessConfigs (IPv4), ++ // the default and recommended name is External NAT, but you can use any ++ // arbitrary string, such as My external IP or Network Access. In ++ // ipv6AccessConfigs, the recommend name is External IPv6. + Name string `json:"name,omitempty"` + +- // NatIP: An external IP address associated with this instance. Specify +- // an unused static external IP address available to the project or +- // leave this field undefined to use an IP from a shared ephemeral IP +- // address pool. If you specify a static external IP address, it must +- // live in the same region as the zone of the instance. ++ // NatIP: Applies to accessConfigs (IPv4) only. An external IP address ++ // associated with this instance. Specify an unused static external IP ++ // address available to the project or leave this field undefined to use ++ // an IP from a shared ephemeral IP address pool. If you specify a ++ // static external IP address, it must live in the same region as the ++ // zone of the instance. + NatIP string `json:"natIP,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -2032,6 +2072,10 @@ type AccessConfig struct { + // external IPv6 range. + PublicPtrDomainName string `json:"publicPtrDomainName,omitempty"` + ++ // SecurityPolicy: [Output Only] The resource URL for the security ++ // policy associated with this access config. ++ SecurityPolicy string `json:"securityPolicy,omitempty"` ++ + // SetPublicPtr: Specifies whether a public DNS 'PTR' record should be + // created to map the external IP address of the instance to a DNS + // domain name. This field is not used in ipv6AccessConfig. A default +@@ -2039,12 +2083,13 @@ type AccessConfig struct { + // associated. + SetPublicPtr bool `json:"setPublicPtr,omitempty"` + +- // Type: The type of configuration. The default and only option is +- // ONE_TO_ONE_NAT. ++ // Type: The type of configuration. In accessConfigs (IPv4), the default ++ // and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default ++ // and only option is DIRECT_IPV6. + // + // Possible values: + // "DIRECT_IPV6" +- // "ONE_TO_ONE_NAT" (default) ++ // "ONE_TO_ONE_NAT" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExternalIpv6") to +@@ -3242,6 +3287,17 @@ type AttachedDisk struct { + // read-write mode. + Mode string `json:"mode,omitempty"` + ++ // SavedState: For LocalSSD disks on VM Instances in STOPPED or ++ // SUSPENDED state, this field is set to PRESERVED if the LocalSSD data ++ // has been saved to a persistent location by customer request. (see the ++ // discard_local_ssd option on Stop/Suspend). Read-only in the api. ++ // ++ // Possible values: ++ // "DISK_SAVED_STATE_UNSPECIFIED" - *[Default]* Disk state has not ++ // been preserved. ++ // "PRESERVED" - Disk state has been preserved. ++ SavedState string `json:"savedState,omitempty"` ++ + // ShieldedInstanceInitialState: [Output Only] shielded vm initial state + // stored on disk + ShieldedInstanceInitialState *InitialStateConfig `json:"shieldedInstanceInitialState,omitempty"` +@@ -3387,6 +3443,13 @@ type AttachedDiskInitializeParams struct { + // disk can handle. Values must be between 1 and 7,124. + ProvisionedThroughput int64 `json:"provisionedThroughput,omitempty,string"` + ++ // ReplicaZones: Required for each regional disk associated with the ++ // instance. Specify the URLs of the zones where the disk should be ++ // replicated to. You must provide exactly two replica zones, and one ++ // zone must be the same as the instance zone. You can't use this option ++ // with boot disks. ++ ReplicaZones []string `json:"replicaZones,omitempty"` ++ + // ResourceManagerTags: Resource manager tags to be bound to the disk. + // Tag keys and values have the same definition as resource manager + // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values +@@ -4386,15 +4449,17 @@ func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { + + // AutoscalingPolicy: Cloud Autoscaler policy. + type AutoscalingPolicy struct { +- // CoolDownPeriodSec: The number of seconds that the autoscaler waits +- // before it starts collecting information from a new instance. This +- // prevents the autoscaler from collecting information when the instance +- // is initializing, during which the collected usage would not be +- // reliable. The default time autoscaler waits is 60 seconds. Virtual +- // machine initialization times might vary because of numerous factors. +- // We recommend that you test how long an instance may take to +- // initialize. To do this, create an instance and time the startup +- // process. ++ // CoolDownPeriodSec: The number of seconds that your application takes ++ // to initialize on a VM instance. This is referred to as the ++ // initialization period (/compute/docs/autoscaler#cool_down_period). ++ // Specifying an accurate initialization period improves autoscaler ++ // decisions. For example, when scaling out, the autoscaler ignores data ++ // from VMs that are still initializing because those VMs might not yet ++ // represent normal usage of your application. The default ++ // initialization period is 60 seconds. Initialization periods might ++ // vary because of numerous factors. We recommend that you test how long ++ // your application takes to initialize. To do this, create a VM and ++ // time your application's startup process. + CoolDownPeriodSec int64 `json:"coolDownPeriodSec,omitempty"` + + // CpuUtilization: Defines the CPU utilization policy that allows the +@@ -4422,7 +4487,12 @@ type AutoscalingPolicy struct { + // instances allowed. + MinNumReplicas int64 `json:"minNumReplicas,omitempty"` + +- // Mode: Defines operating mode for this policy. ++ // Mode: Defines the operating mode for this policy. The following modes ++ // are available: - OFF: Disables the autoscaler but maintains its ++ // configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM ++ // instances only. - ON: Enables all autoscaler activities according to ++ // its policy. For more information, see "Turning off or restricting an ++ // autoscaler" + // + // Possible values: + // "OFF" - Do not automatically scale the MIG in or out. The +@@ -5760,6 +5830,10 @@ type BackendService struct { + // loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED. + MaxStreamDuration *Duration `json:"maxStreamDuration,omitempty"` + ++ // Metadatas: Deployment metadata associated with the resource to be set ++ // by a GKE hub controller and read by the backend RCTH ++ Metadatas map[string]string `json:"metadatas,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -7509,6 +7583,44 @@ func (s *Binding) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// BulkInsertDiskResource: A transient resource used in ++// compute.disks.bulkInsert and compute.regionDisks.bulkInsert. It is ++// only used to process requests and is not persisted. ++type BulkInsertDiskResource struct { ++ // SourceConsistencyGroupPolicy: The URL of the ++ // DiskConsistencyGroupPolicy for the group of disks to clone. This may ++ // be a full or partial URL, such as: - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /resourcePolicies/resourcePolicy - ++ // projects/project/regions/region/resourcePolicies/resourcePolicy - ++ // regions/region/resourcePolicies/resourcePolicy ++ SourceConsistencyGroupPolicy string `json:"sourceConsistencyGroupPolicy,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "SourceConsistencyGroupPolicy") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. ++ // "SourceConsistencyGroupPolicy") to include in API requests with the ++ // JSON null value. By default, fields with empty values are omitted ++ // from API requests. However, any field with an empty value appearing ++ // in NullFields will be sent to the server as null. It is an error if a ++ // field in this list has a non-empty value. This may be used to include ++ // null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *BulkInsertDiskResource) MarshalJSON() ([]byte, error) { ++ type NoMethod BulkInsertDiskResource ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // BulkInsertInstanceResource: A transient resource used in + // compute.instances.bulkInsert and compute.regionInstances.bulkInsert . + // This resource is not persisted anywhere, it is used only for +@@ -7589,11 +7701,15 @@ func (s *BulkInsertInstanceResource) MarshalJSON() ([]byte, error) { + // properties to be set on individual instances. To be extended in the + // future. + type BulkInsertInstanceResourcePerInstanceProperties struct { ++ // Hostname: Specifies the hostname of the instance. More details in: ++ // https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention ++ Hostname string `json:"hostname,omitempty"` ++ + // Name: This field is only temporary. It will be removed. Do not use + // it. + Name string `json:"name,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "Name") to ++ // ForceSendFields is a list of field names (e.g. "Hostname") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -7601,8 +7717,8 @@ type BulkInsertInstanceResourcePerInstanceProperties struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Name") to include in API +- // requests with the JSON null value. By default, fields with empty ++ // NullFields is a list of field names (e.g. "Hostname") to include in ++ // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. +@@ -7889,7 +8005,7 @@ type Commitment struct { + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + +- // SplitSourceCommitment: Source commitment to be splitted into a new ++ // SplitSourceCommitment: Source commitment to be split into a new + // commitment. + SplitSourceCommitment string `json:"splitSourceCommitment,omitempty"` + +@@ -7930,6 +8046,7 @@ type Commitment struct { + // "GENERAL_PURPOSE_N2" + // "GENERAL_PURPOSE_N2D" + // "GENERAL_PURPOSE_T2D" ++ // "GRAPHICS_OPTIMIZED" + // "MEMORY_OPTIMIZED" + // "MEMORY_OPTIMIZED_M3" + // "TYPE_UNSPECIFIED" +@@ -9095,6 +9212,13 @@ type Disk struct { + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + ++ // AsyncPrimaryDisk: Disk asynchronously replicated into this disk. ++ AsyncPrimaryDisk *DiskAsyncReplication `json:"asyncPrimaryDisk,omitempty"` ++ ++ // AsyncSecondaryDisks: [Output Only] A list of disks this disk is ++ // asynchronously replicated to. ++ AsyncSecondaryDisks map[string]DiskAsyncReplicationList `json:"asyncSecondaryDisks,omitempty"` ++ + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` +@@ -9250,6 +9374,10 @@ type Disk struct { + // automatic snapshot creations. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + ++ // ResourceStatus: [Output Only] Status information for the disk ++ // resource. ++ ResourceStatus *DiskResourceStatus `json:"resourceStatus,omitempty"` ++ + // SatisfiesPzs: [Output Only] Reserved for future use. + SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + +@@ -9265,6 +9393,16 @@ type Disk struct { + // source. Acceptable values are 1 to 65536, inclusive. + SizeGb int64 `json:"sizeGb,omitempty,string"` + ++ // SourceConsistencyGroupPolicy: [Output Only] URL of the ++ // DiskConsistencyGroupPolicy for a secondary disk that was created ++ // using a consistency group. ++ SourceConsistencyGroupPolicy string `json:"sourceConsistencyGroupPolicy,omitempty"` ++ ++ // SourceConsistencyGroupPolicyId: [Output Only] ID of the ++ // DiskConsistencyGroupPolicy for a secondary disk that was created ++ // using a consistency group. ++ SourceConsistencyGroupPolicyId string `json:"sourceConsistencyGroupPolicyId,omitempty"` ++ + // SourceDisk: The source disk used to create this disk. You can provide + // this as a partial or full URL to the resource. For example, the + // following are valid values: - +@@ -9311,6 +9449,24 @@ type Disk struct { + // version of the image that was used. + SourceImageId string `json:"sourceImageId,omitempty"` + ++ // SourceInstantSnapshot: The source instant snapshot used to create ++ // this disk. You can provide this as a partial or full URL to the ++ // resource. For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /instantSnapshots/instantSnapshot - ++ // projects/project/zones/zone/instantSnapshots/instantSnapshot - ++ // zones/zone/instantSnapshots/instantSnapshot ++ SourceInstantSnapshot string `json:"sourceInstantSnapshot,omitempty"` ++ ++ // SourceInstantSnapshotId: [Output Only] The unique ID of the instant ++ // snapshot used to create this disk. This value identifies the exact ++ // instant snapshot that was used to create this persistent disk. For ++ // example, if you created the persistent disk from an instant snapshot ++ // that was later deleted and recreated under the same name, the source ++ // instant snapshot ID would identify the exact version of the instant ++ // snapshot that was used. ++ SourceInstantSnapshotId string `json:"sourceInstantSnapshotId,omitempty"` ++ + // SourceSnapshot: The source snapshot used to create this disk. You can + // provide this as a partial or full URL to the resource. For example, + // the following are valid values: - +@@ -9603,6 +9759,86 @@ func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type DiskAsyncReplication struct { ++ // ConsistencyGroupPolicy: [Output Only] URL of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicy string `json:"consistencyGroupPolicy,omitempty"` ++ ++ // ConsistencyGroupPolicyId: [Output Only] ID of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicyId string `json:"consistencyGroupPolicyId,omitempty"` ++ ++ // Disk: The other disk asynchronously replicated to or from the current ++ // disk. You can provide this as a partial or full URL to the resource. ++ // For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /disks/disk - projects/project/zones/zone/disks/disk - ++ // zones/zone/disks/disk ++ Disk string `json:"disk,omitempty"` ++ ++ // DiskId: [Output Only] The unique ID of the other disk asynchronously ++ // replicated to or from the current disk. This value identifies the ++ // exact disk that was used to create this replication. For example, if ++ // you started replicating the persistent disk from a disk that was ++ // later deleted and recreated under the same name, the disk ID would ++ // identify the exact version of the disk that was used. ++ DiskId string `json:"diskId,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "ConsistencyGroupPolicy") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ConsistencyGroupPolicy") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskAsyncReplication) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskAsyncReplication ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type DiskAsyncReplicationList struct { ++ AsyncReplicationDisk *DiskAsyncReplication `json:"asyncReplicationDisk,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "AsyncReplicationDisk") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AsyncReplicationDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskAsyncReplicationList) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskAsyncReplicationList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // DiskInstantiationConfig: A specification of the desired way to + // instantiate a disk in the instance template when its created from a + // source instance. +@@ -9944,6 +10180,70 @@ func (s *DiskParams) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type DiskResourceStatus struct { ++ AsyncPrimaryDisk *DiskResourceStatusAsyncReplicationStatus `json:"asyncPrimaryDisk,omitempty"` ++ ++ // AsyncSecondaryDisks: Key: disk, value: AsyncReplicationStatus message ++ AsyncSecondaryDisks map[string]DiskResourceStatusAsyncReplicationStatus `json:"asyncSecondaryDisks,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "AsyncPrimaryDisk") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AsyncPrimaryDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskResourceStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskResourceStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type DiskResourceStatusAsyncReplicationStatus struct { ++ // Possible values: ++ // "ACTIVE" - Replication is active. ++ // "CREATED" - Secondary disk is created and is waiting for ++ // replication to start. ++ // "STARTING" - Replication is starting. ++ // "STATE_UNSPECIFIED" ++ // "STOPPED" - Replication is stopped. ++ // "STOPPING" - Replication is stopping. ++ State string `json:"state,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "State") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "State") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskResourceStatusAsyncReplicationStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskResourceStatusAsyncReplicationStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // DiskType: Represents a Disk Type resource. Google Compute Engine has + // two Disk Type resources: * Regional + // (/compute/docs/reference/rest/beta/regionDiskTypes) * Zonal +@@ -10828,6 +11128,79 @@ func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type DisksStartAsyncReplicationRequest struct { ++ // AsyncSecondaryDisk: The secondary disk to start asynchronous ++ // replication to. You can provide this as a partial or full URL to the ++ // resource. For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /disks/disk - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /disks/disk - projects/project/zones/zone/disks/disk - ++ // projects/project/regions/region/disks/disk - zones/zone/disks/disk - ++ // regions/region/disks/disk ++ AsyncSecondaryDisk string `json:"asyncSecondaryDisk,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "AsyncSecondaryDisk") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AsyncSecondaryDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DisksStartAsyncReplicationRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod DisksStartAsyncReplicationRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// DisksStopGroupAsyncReplicationResource: A transient resource used in ++// compute.disks.stopGroupAsyncReplication and ++// compute.regionDisks.stopGroupAsyncReplication. It is only used to ++// process requests and is not persisted. ++type DisksStopGroupAsyncReplicationResource struct { ++ // ResourcePolicy: The URL of the DiskConsistencyGroupPolicy for the ++ // group of disks to stop. This may be a full or partial URL, such as: - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /resourcePolicies/resourcePolicy - ++ // projects/project/regions/region/resourcePolicies/resourcePolicy - ++ // regions/region/resourcePolicies/resourcePolicy ++ ResourcePolicy string `json:"resourcePolicy,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ResourcePolicy") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ResourcePolicy") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DisksStopGroupAsyncReplicationResource) MarshalJSON() ([]byte, error) { ++ type NoMethod DisksStopGroupAsyncReplicationResource ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // DisplayDevice: A set of Display Device options + type DisplayDevice struct { + // EnableDisplay: Defines whether the instance has Display enabled. +@@ -11469,6 +11842,15 @@ type ExternalVpnGatewayInterface struct { + // be an IP address from Google Compute Engine. + IpAddress string `json:"ipAddress,omitempty"` + ++ // Ipv6Address: IPv6 address of the interface in the external VPN ++ // gateway. This IPv6 address can be either from your on-premise gateway ++ // or another Cloud provider's VPN gateway, it cannot be an IP address ++ // from Google Compute Engine. Must specify an IPv6 address (not ++ // IPV4-mapped) using any format described in RFC 4291 (e.g. ++ // 2001:db8:0:0:2d9:51:0:0). The output format is RFC 5952 format (e.g. ++ // 2001:db8::2d9:51:0:0). ++ Ipv6Address string `json:"ipv6Address,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -12634,6 +13016,13 @@ type FirewallPolicyRule struct { + // single firewall policy rule. + RuleTupleCount int64 `json:"ruleTupleCount,omitempty"` + ++ // SecurityProfileGroup: A fully-qualified URL of a SecurityProfile ++ // resource instance. Example: ++ // https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group ++ // Must be specified if action = 'apply_security_profile_group' and ++ // cannot be specified for other actions. ++ SecurityProfileGroup string `json:"securityProfileGroup,omitempty"` ++ + // TargetResources: A list of network resource URLs to which this rule + // applies. This field allows you to control which network's VMs get + // this rule. If this field is left blank, all VMs within the +@@ -12656,6 +13045,11 @@ type FirewallPolicyRule struct { + // of instances that are applied with this rule. + TargetServiceAccounts []string `json:"targetServiceAccounts,omitempty"` + ++ // TlsInspect: Boolean flag indicating if the traffic should be TLS ++ // decrypted. Can be set only if action = 'apply_security_profile_group' ++ // and cannot be set for other actions. ++ TlsInspect bool `json:"tlsInspect,omitempty"` ++ + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +@@ -13077,9 +13471,10 @@ type ForwardingRule struct { + // Network: This field is not used for external load balancing. For + // Internal TCP/UDP Load Balancing, this field identifies the network + // that the load balanced IP should belong to for this Forwarding Rule. +- // If this field is not specified, the default network will be used. For +- // Private Service Connect forwarding rules that forward traffic to +- // Google APIs, a network must be provided. ++ // If the subnetwork is specified, the network of the subnetwork will be ++ // used. If neither subnetwork nor this field is specified, the default ++ // network will be used. For Private Service Connect forwarding rules ++ // that forward traffic to Google APIs, a network must be provided. + Network string `json:"network,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -13937,6 +14332,43 @@ func (s *GRPCHealthCheck) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type GlobalAddressesMoveRequest struct { ++ // Description: An optional destination address description if intended ++ // to be different from the source. ++ Description string `json:"description,omitempty"` ++ ++ // DestinationAddress: The URL of the destination address to move to. ++ // This can be a full or partial URL. For example, the following are all ++ // valid URLs to a address: - ++ // https://www.googleapis.com/compute/v1/projects/project ++ // /global/addresses/address - projects/project/global/addresses/address ++ // Note that destination project must be different from the source ++ // project. So /global/addresses/address is not valid partial url. ++ DestinationAddress string `json:"destinationAddress,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Description") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Description") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *GlobalAddressesMoveRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod GlobalAddressesMoveRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be attached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` +@@ -14226,8 +14658,8 @@ type GuestOsFeature struct { + // commas to separate values. Set to one or more of the following + // values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - + // UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - +- // SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling +- // guest operating system features. ++ // SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see ++ // Enabling guest operating system features. + // + // Possible values: + // "FEATURE_TYPE_UNSPECIFIED" +@@ -14235,6 +14667,7 @@ type GuestOsFeature struct { + // "MULTI_IP_SUBNET" + // "SECURE_BOOT" + // "SEV_CAPABLE" ++ // "SEV_LIVE_MIGRATABLE" + // "SEV_SNP_CAPABLE" + // "UEFI_COMPATIBLE" + // "VIRTIO_SCSI_MULTIQUEUE" +@@ -14552,12 +14985,12 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { + // (/compute/docs/reference/rest/beta/regionHealthChecks) Internal + // HTTP(S) load balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Traffic Director must use global +-// health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load ++// health checks (`compute.v1.healthChecks`). Internal TCP/UDP load + // balancers can use either regional or global health checks +-// (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). ++// (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). + // External HTTP(S), TCP proxy, and SSL proxy load balancers as well as + // managed instance group auto-healing must use global health checks +-// (`compute.v1.HealthChecks`). Backend service-based network load ++// (`compute.v1.healthChecks`). Backend service-based network load + // balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Target pool-based network load + // balancers must use legacy HTTP health checks +@@ -15655,7 +16088,7 @@ type HealthStatus struct { + // instance. + ForwardingRuleIp string `json:"forwardingRuleIp,omitempty"` + +- // HealthState: Health state of the instance. ++ // HealthState: Health state of the IPv4 address of the instance. + // + // Possible values: + // "HEALTHY" +@@ -15740,12 +16173,22 @@ type HealthStatusForNetworkEndpoint struct { + // the health checks configured. + // + // Possible values: +- // "DRAINING" +- // "HEALTHY" +- // "UNHEALTHY" +- // "UNKNOWN" ++ // "DRAINING" - Endpoint is being drained. ++ // "HEALTHY" - Endpoint is healthy. ++ // "UNHEALTHY" - Endpoint is unhealthy. ++ // "UNKNOWN" - Health status of the endpoint is unknown. + HealthState string `json:"healthState,omitempty"` + ++ // Ipv6HealthState: Health state of the ipv6 network endpoint determined ++ // based on the health checks configured. ++ // ++ // Possible values: ++ // "DRAINING" - Endpoint is being drained. ++ // "HEALTHY" - Endpoint is healthy. ++ // "UNHEALTHY" - Endpoint is unhealthy. ++ // "UNKNOWN" - Health status of the endpoint is unknown. ++ Ipv6HealthState string `json:"ipv6HealthState,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "BackendService") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -18136,9 +18579,9 @@ type Instance struct { + // cycle. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -19447,13 +19890,14 @@ type InstanceGroupManagerAutoHealingPolicy struct { + // HealthCheck: The URL for the health check that signals autohealing. + HealthCheck string `json:"healthCheck,omitempty"` + +- // InitialDelaySec: The number of seconds that the managed instance +- // group waits before it applies autohealing policies to new instances +- // or recently recreated instances. This initial delay allows instances +- // to initialize and run their startup scripts before the instance group +- // determines that they are UNHEALTHY. This prevents the managed +- // instance group from recreating its instances prematurely. This value +- // must be from range [0, 3600]. ++ // InitialDelaySec: The initial delay is the number of seconds that a ++ // new VM takes to initialize and run its startup script. During a VM's ++ // initial delay period, the MIG ignores unsuccessful health checks ++ // because the VM might be in the startup process. This prevents the MIG ++ // from prematurely recreating a VM. If the health check receives a ++ // healthy response during the initial delay, it indicates that the ++ // startup process is complete and the VM is ready. The value of initial ++ // delay must be between 0 and 3600 seconds. The default value is 0. + InitialDelaySec int64 `json:"initialDelaySec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthCheck") to +@@ -20213,7 +20657,9 @@ func (s *InstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, err + type InstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The URLs of one or more instances to delete. This can be a + // full URL or a partial URL, such as +- // zones/[ZONE]/instances/[INSTANCE_NAME]. ++ // zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have ++ // URL and can be deleted only by name. One cannot specify both URLs and ++ // names in a single request. + Instances []string `json:"instances,omitempty"` + + // SkipInstancesOnValidationError: Specifies whether the request should +@@ -22902,9 +23348,9 @@ type InstanceWithNamedPorts struct { + // Status: [Output Only] The status of the instance. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -23475,6 +23921,42 @@ func (s *InstancesSetNameRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InstancesSetSecurityPolicyRequest struct { ++ // NetworkInterfaces: The network interfaces that the security policy ++ // will be applied to. Network interfaces use the nicN naming format. ++ // You can only set a security policy for network interfaces with an ++ // access config. ++ NetworkInterfaces []string `json:"networkInterfaces,omitempty"` ++ ++ // SecurityPolicy: A full or partial URL to a security policy to add to ++ // this instance. If this field is set to an empty string it will remove ++ // the associated security policy. ++ SecurityPolicy string `json:"securityPolicy,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "NetworkInterfaces") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "NetworkInterfaces") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstancesSetSecurityPolicyRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod InstancesSetSecurityPolicyRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type InstancesSetServiceAccountRequest struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` +@@ -23537,6 +24019,801 @@ func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// InstantSnapshot: Represents a InstantSnapshot resource. You can use ++// instant snapshots to create disk rollback points quickly.. ++type InstantSnapshot struct { ++ // Architecture: [Output Only] The architecture of the instant snapshot. ++ // Valid values are ARM64 or X86_64. ++ // ++ // Possible values: ++ // "ARCHITECTURE_UNSPECIFIED" - Default value indicating Architecture ++ // is not set. ++ // "ARM64" - Machines with architecture ARM64 ++ // "X86_64" - Machines with architecture X86_64 ++ Architecture string `json:"architecture,omitempty"` ++ ++ // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text ++ // format. ++ CreationTimestamp string `json:"creationTimestamp,omitempty"` ++ ++ // Description: An optional description of this resource. Provide this ++ // property when you create the resource. ++ Description string `json:"description,omitempty"` ++ ++ // DiskSizeGb: [Output Only] Size of the source disk, specified in GB. ++ DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` ++ ++ // Id: [Output Only] The unique identifier for the resource. This ++ // identifier is defined by the server. ++ Id uint64 `json:"id,omitempty,string"` ++ ++ // Kind: [Output Only] Type of the resource. Always ++ // compute#instantSnapshot for InstantSnapshot resources. ++ Kind string `json:"kind,omitempty"` ++ ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // InstantSnapshot, which is essentially a hash of the labels set used ++ // for optimistic locking. The fingerprint is initially generated by ++ // Compute Engine and changes after every request to modify or update ++ // labels. You must always provide an up-to-date fingerprint hash in ++ // order to update or change labels, otherwise the request will fail ++ // with error 412 conditionNotMet. To see the latest fingerprint, make a ++ // get() request to retrieve a InstantSnapshot. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels to apply to this InstantSnapshot. These can be later ++ // modified by the setLabels method. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ ++ // Name: Name of the resource; provided by the client when the resource ++ // is created. The name must be 1-63 characters long, and comply with ++ // RFC1035. Specifically, the name must be 1-63 characters long and ++ // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means ++ // the first character must be a lowercase letter, and all following ++ // characters must be a dash, lowercase letter, or digit, except the ++ // last character, which cannot be a dash. ++ Name string `json:"name,omitempty"` ++ ++ // Region: [Output Only] URL of the region where the instant snapshot ++ // resides. You must specify this field as part of the HTTP request URL. ++ // It is not settable as a field in the request body. ++ Region string `json:"region,omitempty"` ++ ++ // ResourceStatus: [Output Only] Status information for the instant ++ // snapshot resource. ++ ResourceStatus *InstantSnapshotResourceStatus `json:"resourceStatus,omitempty"` ++ ++ // SatisfiesPzs: [Output Only] Reserved for future use. ++ SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for the resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // SelfLinkWithId: [Output Only] Server-defined URL for this resource's ++ // resource id. ++ SelfLinkWithId string `json:"selfLinkWithId,omitempty"` ++ ++ // SourceDisk: URL of the source disk used to create this instant ++ // snapshot. Note that the source disk must be in the same zone/region ++ // as the instant snapshot to be created. This can be a full or valid ++ // partial URL. For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /disks/disk - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /disks/disk - projects/project/zones/zone/disks/disk - ++ // projects/project/regions/region/disks/disk - zones/zone/disks/disk - ++ // regions/region/disks/disk ++ SourceDisk string `json:"sourceDisk,omitempty"` ++ ++ // SourceDiskId: [Output Only] The ID value of the disk used to create ++ // this InstantSnapshot. This value may be used to determine whether the ++ // InstantSnapshot was taken from the current or a previous instance of ++ // a given disk name. ++ SourceDiskId string `json:"sourceDiskId,omitempty"` ++ ++ // Status: [Output Only] The status of the instantSnapshot. This can be ++ // CREATING, DELETING, FAILED, or READY. ++ // ++ // Possible values: ++ // "CREATING" - InstantSnapshot creation is in progress. ++ // "DELETING" - InstantSnapshot is currently being deleted. ++ // "FAILED" - InstantSnapshot creation failed. ++ // "READY" - InstantSnapshot has been created successfully. ++ Status string `json:"status,omitempty"` ++ ++ // Zone: [Output Only] URL of the zone where the instant snapshot ++ // resides. You must specify this field as part of the HTTP request URL. ++ // It is not settable as a field in the request body. ++ Zone string `json:"zone,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Architecture") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Architecture") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshot) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshot ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotAggregatedList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InstantSnapshotsScopedList resources. ++ Items map[string]InstantSnapshotsScopedList `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always ++ // compute#instantSnapshotAggregatedList for aggregated lists of ++ // instantSnapshots. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token allows you to get the next ++ // page of results for list requests. If the number of results is larger ++ // than maxResults, use the nextPageToken as a value for the query ++ // parameter pageToken in the next list request. Subsequent list ++ // requests will have their own nextPageToken to continue paging through ++ // the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Unreachables: [Output Only] Unreachable resources. ++ Unreachables []string `json:"unreachables,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *InstantSnapshotAggregatedListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotAggregatedList) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotAggregatedList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InstantSnapshotAggregatedListWarning: [Output Only] Informational ++// warning message. ++type InstantSnapshotAggregatedListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InstantSnapshotAggregatedListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotAggregatedListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotAggregatedListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotAggregatedListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotAggregatedListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotAggregatedListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotExportParams struct { ++ // BaseInstantSnapshot: An optional base instant snapshot that this ++ // resource is compared against. If not specified, all blocks of this ++ // resource are exported. The base instant snapshot and this resource ++ // must be created from the same disk. The base instant snapshot must be ++ // created earlier in time than this resource. ++ BaseInstantSnapshot string `json:"baseInstantSnapshot,omitempty"` ++ ++ // BucketName: The name of an existing bucket in Cloud Storage where the ++ // changed blocks will be stored. The Google Service Account must have ++ // read and write access to this bucket. The bucket has to be in the ++ // same region as this resource. ++ BucketName string `json:"bucketName,omitempty"` ++ ++ // EncryptionKey: Encryption key used to encrypt the instant snapshot. ++ EncryptionKey *CustomerEncryptionKey `json:"encryptionKey,omitempty"` ++ ++ // ObjectName: Name of the output Bigstore object storing the changed ++ // blocks. Object name must be less than 1024 bytes in length. ++ ObjectName string `json:"objectName,omitempty"` ++ ++ // OutputType: The format of the output file. ++ // ++ // Possible values: ++ // "INVALID" ++ // "METADATA_AND_DATA" ++ // "METADATA_ONLY" ++ OutputType string `json:"outputType,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "BaseInstantSnapshot") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "BaseInstantSnapshot") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotExportParams) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotExportParams ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InstantSnapshotList: Contains a list of InstantSnapshot resources. ++type InstantSnapshotList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InstantSnapshot resources. ++ Items []*InstantSnapshot `json:"items,omitempty"` ++ ++ // Kind: Type of resource. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token allows you to get the next ++ // page of results for list requests. If the number of results is larger ++ // than maxResults, use the nextPageToken as a value for the query ++ // parameter pageToken in the next list request. Subsequent list ++ // requests will have their own nextPageToken to continue paging through ++ // the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *InstantSnapshotListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotList) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InstantSnapshotListWarning: [Output Only] Informational warning ++// message. ++type InstantSnapshotListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InstantSnapshotListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotResourceStatus struct { ++ // StorageSizeBytes: [Output Only] The storage size of this instant ++ // snapshot. ++ StorageSizeBytes int64 `json:"storageSizeBytes,omitempty,string"` ++ ++ // ForceSendFields is a list of field names (e.g. "StorageSizeBytes") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "StorageSizeBytes") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotResourceStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotResourceStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotsExportRequest struct { ++ // ExportParams: Parameters to export the changed blocks. ++ ExportParams *InstantSnapshotExportParams `json:"exportParams,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ExportParams") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ExportParams") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotsExportRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotsExportRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotsScopedList struct { ++ // InstantSnapshots: [Output Only] A list of instantSnapshots contained ++ // in this scope. ++ InstantSnapshots []*InstantSnapshot `json:"instantSnapshots,omitempty"` ++ ++ // Warning: [Output Only] Informational warning which replaces the list ++ // of instantSnapshots when the list is empty. ++ Warning *InstantSnapshotsScopedListWarning `json:"warning,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "InstantSnapshots") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "InstantSnapshots") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotsScopedList) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotsScopedList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InstantSnapshotsScopedListWarning: [Output Only] Informational ++// warning which replaces the list of instantSnapshots when the list is ++// empty. ++type InstantSnapshotsScopedListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InstantSnapshotsScopedListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotsScopedListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotsScopedListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InstantSnapshotsScopedListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InstantSnapshotsScopedListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InstantSnapshotsScopedListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // Int64RangeMatch: HttpRouteRuleMatch criteria for field values that + // must stay within the specified integer range. + type Int64RangeMatch struct { +@@ -23572,9 +24849,9 @@ func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { + } + + // Interconnect: Represents an Interconnect resource. An Interconnect +-// resource is a dedicated connection between the GCP network and your +-// on-premises network. For more information, read the Dedicated +-// Interconnect Overview. ++// resource is a dedicated connection between the Google Cloud network ++// and your on-premises network. For more information, read the ++// Dedicated Interconnect Overview. + type Interconnect struct { + // AdminEnabled: Administrative status of the interconnect. When this is + // set to true, the Interconnect is functional and can carry traffic. +@@ -23714,6 +24991,11 @@ type Interconnect struct { + // provisioned in this interconnect. + ProvisionedLinkCount int64 `json:"provisionedLinkCount,omitempty"` + ++ // RemoteLocation: Indicates that this is a Cross-Cloud Interconnect. ++ // This field specifies the location outside of Google's network that ++ // the interconnect is connected to. ++ RemoteLocation string `json:"remoteLocation,omitempty"` ++ + // RequestedLinkCount: Target number of physical links in the link + // bundle, as requested by the customer. + RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` +@@ -23829,6 +25111,11 @@ type InterconnectAttachment struct { + // CloudRouterIpv6InterfaceId: This field is not available. + CloudRouterIpv6InterfaceId string `json:"cloudRouterIpv6InterfaceId,omitempty"` + ++ // ConfigurationConstraints: [Output Only] Constraints for this ++ // attachment, if any. The attachment does not work if these constraints ++ // are not met. ++ ConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"configurationConstraints,omitempty"` ++ + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` +@@ -23921,8 +25208,7 @@ type InterconnectAttachment struct { + // attachment. If this field is not specified when creating the VLAN + // attachment, then later on when creating an HA VPN gateway on this + // VLAN attachment, the HA VPN gateway's IP address is allocated from +- // the regional external IP address pool. Not currently available +- // publicly. ++ // the regional external IP address pool. + IpsecInternalAddresses []string `json:"ipsecInternalAddresses,omitempty"` + + // Kind: [Output Only] Type of the resource. Always +@@ -23999,6 +25285,14 @@ type InterconnectAttachment struct { + // body. + Region string `json:"region,omitempty"` + ++ // RemoteService: [Output Only] If the attachment is on a Cross-Cloud ++ // Interconnect connection, this field contains the interconnect's ++ // remote location service provider. Example values: "Amazon Web ++ // Services" "Microsoft Azure". The field is set only for attachments on ++ // Cross-Cloud Interconnect connections. Its value is copied from the ++ // InterconnectRemoteLocation remoteService field. ++ RemoteService string `json:"remoteService,omitempty"` ++ + // Router: URL of the Cloud Router to be used for dynamic routing. This + // router must be in the same region as this InterconnectAttachment. The + // InterconnectAttachment will automatically connect the Interconnect to +@@ -24062,6 +25356,16 @@ type InterconnectAttachment struct { + // yet, because turnup is not complete. + State string `json:"state,omitempty"` + ++ // SubnetLength: Length of the IPv4 subnet mask. Allowed values: - 29 ++ // (default) - 30 The default value is 29, except for Cross-Cloud ++ // Interconnect connections that use an InterconnectRemoteLocation with ++ // a constraints.subnetLengthRange.min equal to 30. For example, ++ // connections that use an Azure remote location fall into this ++ // category. In these cases, the default value is 30, and requesting 29 ++ // returns an error. Where both 29 and 30 are allowed, 29 is preferred, ++ // because it gives Google Cloud Support more debugging visibility. ++ SubnetLength int64 `json:"subnetLength,omitempty"` ++ + // Type: The type of interconnect attachment this is, which can take one + // of the following values: - DEDICATED: an attachment to a Dedicated + // Interconnect. - PARTNER: an attachment to a Partner Interconnect, +@@ -24300,6 +25604,87 @@ func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InterconnectAttachmentConfigurationConstraints struct { ++ // BgpMd5: [Output Only] Whether the attachment's BGP session ++ // requires/allows/disallows BGP MD5 authentication. This can take one ++ // of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. ++ // For example, a Cross-Cloud Interconnect connection to a remote cloud ++ // provider that requires BGP MD5 authentication has the ++ // interconnectRemoteLocation ++ // attachment_configuration_constraints.bgp_md5 field set to ++ // MD5_REQUIRED, and that property is propagated to the attachment. ++ // Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 ++ // is requested. ++ // ++ // Possible values: ++ // "MD5_OPTIONAL" - MD5_OPTIONAL: BGP MD5 authentication is supported ++ // and can optionally be configured. ++ // "MD5_REQUIRED" - MD5_REQUIRED: BGP MD5 authentication must be ++ // configured. ++ // "MD5_UNSUPPORTED" - MD5_UNSUPPORTED: BGP MD5 authentication must ++ // not be configured ++ BgpMd5 string `json:"bgpMd5,omitempty"` ++ ++ // BgpPeerAsnRanges: [Output Only] List of ASN ranges that the remote ++ // location is known to support. Formatted as an array of inclusive ++ // ranges {min: min-value, max: max-value}. For example, [{min: 123, ++ // max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or ++ // anything in the range 64512-65534. This field is only advisory. ++ // Although the API accepts other ranges, these are the ranges that we ++ // recommend. ++ BgpPeerAsnRanges []*InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange `json:"bgpPeerAsnRanges,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "BgpMd5") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "BgpMd5") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectAttachmentConfigurationConstraints) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectAttachmentConfigurationConstraints ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange struct { ++ Max int64 `json:"max,omitempty"` ++ ++ Min int64 `json:"min,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Max") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Max") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // InterconnectAttachmentList: Response to the list request, and + // contains a list of interconnect attachments. + type InterconnectAttachmentList struct { +@@ -25688,6 +27073,468 @@ func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// InterconnectRemoteLocation: Represents a Cross-Cloud Interconnect ++// Remote Location resource. You can use this resource to find remote ++// location details about an Interconnect attachment (VLAN). ++type InterconnectRemoteLocation struct { ++ // Address: [Output Only] The postal address of the Point of Presence, ++ // each line in the address is separated by a newline character. ++ Address string `json:"address,omitempty"` ++ ++ // AttachmentConfigurationConstraints: [Output Only] Subset of fields ++ // from InterconnectAttachment's |configurationConstraints| field that ++ // apply to all attachments for this remote location. ++ AttachmentConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"attachmentConfigurationConstraints,omitempty"` ++ ++ // City: [Output Only] Metropolitan area designator that indicates which ++ // city an interconnect is located. For example: "Chicago, IL", ++ // "Amsterdam, Netherlands". ++ City string `json:"city,omitempty"` ++ ++ // Constraints: [Output Only] Constraints on the parameters for creating ++ // Cross-Cloud Interconnect and associated InterconnectAttachments. ++ Constraints *InterconnectRemoteLocationConstraints `json:"constraints,omitempty"` ++ ++ // Continent: [Output Only] Continent for this location, which can take ++ // one of the following values: - AFRICA - ASIA_PAC - EUROPE - ++ // NORTH_AMERICA - SOUTH_AMERICA ++ // ++ // Possible values: ++ // "AFRICA" ++ // "ASIA_PAC" ++ // "EUROPE" ++ // "NORTH_AMERICA" ++ // "SOUTH_AMERICA" ++ Continent string `json:"continent,omitempty"` ++ ++ // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text ++ // format. ++ CreationTimestamp string `json:"creationTimestamp,omitempty"` ++ ++ // Description: [Output Only] An optional description of the resource. ++ Description string `json:"description,omitempty"` ++ ++ // FacilityProvider: [Output Only] The name of the provider for this ++ // facility (e.g., EQUINIX). ++ FacilityProvider string `json:"facilityProvider,omitempty"` ++ ++ // FacilityProviderFacilityId: [Output Only] A provider-assigned ++ // Identifier for this facility (e.g., Ashburn-DC1). ++ FacilityProviderFacilityId string `json:"facilityProviderFacilityId,omitempty"` ++ ++ // Id: [Output Only] The unique identifier for the resource. This ++ // identifier is defined by the server. ++ Id uint64 `json:"id,omitempty,string"` ++ ++ // Kind: [Output Only] Type of the resource. Always ++ // compute#interconnectRemoteLocation for interconnect remote locations. ++ Kind string `json:"kind,omitempty"` ++ ++ // Lacp: [Output Only] Link Aggregation Control Protocol (LACP) ++ // constraints, which can take one of the following values: ++ // LACP_SUPPORTED, LACP_UNSUPPORTED ++ // ++ // Possible values: ++ // "LACP_SUPPORTED" - LACP_SUPPORTED: LACP is supported, and enabled ++ // by default on the Cross-Cloud Interconnect. ++ // "LACP_UNSUPPORTED" - LACP_UNSUPPORTED: LACP is not supported and is ++ // not be enabled on this port. GetDiagnostics shows ++ // bundleAggregationType as "static". GCP does not support LAGs without ++ // LACP, so requestedLinkCount must be 1. ++ Lacp string `json:"lacp,omitempty"` ++ ++ // MaxLagSize100Gbps: [Output Only] The maximum number of 100 Gbps ports ++ // supported in a link aggregation group (LAG). When linkType is 100 ++ // Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps. ++ MaxLagSize100Gbps int64 `json:"maxLagSize100Gbps,omitempty"` ++ ++ // MaxLagSize10Gbps: [Output Only] The maximum number of 10 Gbps ports ++ // supported in a link aggregation group (LAG). When linkType is 10 ++ // Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps. ++ MaxLagSize10Gbps int64 `json:"maxLagSize10Gbps,omitempty"` ++ ++ // Name: [Output Only] Name of the resource. ++ Name string `json:"name,omitempty"` ++ ++ // PeeringdbFacilityId: [Output Only] The peeringdb identifier for this ++ // facility (corresponding with a netfac type in peeringdb). ++ PeeringdbFacilityId string `json:"peeringdbFacilityId,omitempty"` ++ ++ // PermittedConnections: [Output Only] Permitted connections. ++ PermittedConnections []*InterconnectRemoteLocationPermittedConnections `json:"permittedConnections,omitempty"` ++ ++ // RemoteService: [Output Only] Indicates the service provider present ++ // at the remote location. Example values: "Amazon Web Services", ++ // "Microsoft Azure". ++ RemoteService string `json:"remoteService,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for the resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Status: [Output Only] The status of this InterconnectRemoteLocation, ++ // which can take one of the following values: - CLOSED: The ++ // InterconnectRemoteLocation is closed and is unavailable for ++ // provisioning new Cross-Cloud Interconnects. - AVAILABLE: The ++ // InterconnectRemoteLocation is available for provisioning new ++ // Cross-Cloud Interconnects. ++ // ++ // Possible values: ++ // "AVAILABLE" - The InterconnectRemoteLocation is available for ++ // provisioning new Cross-Cloud Interconnects. ++ // "CLOSED" - The InterconnectRemoteLocation is closed for ++ // provisioning new Cross-Cloud Interconnects. ++ Status string `json:"status,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Address") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Address") to include in ++ // API requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocation) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocation ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationConstraints struct { ++ // PortPairRemoteLocation: [Output Only] Port pair remote location ++ // constraints, which can take one of the following values: ++ // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, ++ // PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to ++ // individual ports, but the UI uses this field when ordering a pair of ++ // ports, to prevent users from accidentally ordering something that is ++ // incompatible with their cloud provider. Specifically, when ordering a ++ // redundant pair of Cross-Cloud Interconnect ports, and one of them ++ // uses a remote location with portPairMatchingRemoteLocation set to ++ // matching, the UI requires that both ports use the same remote ++ // location. ++ // ++ // Possible values: ++ // "PORT_PAIR_MATCHING_REMOTE_LOCATION" - If ++ // PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider ++ // allocates ports in pairs, and the user should choose the same remote ++ // location for both ports. ++ // "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" - If ++ // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision ++ // a redundant pair of Cross-Cloud Interconnects using two different ++ // remote locations in the same city. ++ PortPairRemoteLocation string `json:"portPairRemoteLocation,omitempty"` ++ ++ // PortPairVlan: [Output Only] Port pair VLAN constraints, which can ++ // take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, ++ // PORT_PAIR_MATCHING_VLAN ++ // ++ // Possible values: ++ // "PORT_PAIR_MATCHING_VLAN" - If PORT_PAIR_MATCHING_VLAN, the ++ // Interconnect for this attachment is part of a pair of ports that ++ // should have matching VLAN allocations. This occurs with Cross-Cloud ++ // Interconnect to Azure remote locations. While GCP's API does not ++ // explicitly group pairs of ports, the UI uses this field to ensure ++ // matching VLAN ids when configuring a redundant VLAN pair. ++ // "PORT_PAIR_UNCONSTRAINED_VLAN" - PORT_PAIR_UNCONSTRAINED_VLAN means ++ // there is no constraint. ++ PortPairVlan string `json:"portPairVlan,omitempty"` ++ ++ // SubnetLengthRange: [Output Only] [min-length, max-length] The minimum ++ // and maximum value (inclusive) for the IPv4 subnet length. For ++ // example, an interconnectRemoteLocation for Azure has {min: 30, max: ++ // 30} because Azure requires /30 subnets. This range specifies the ++ // values supported by both cloud providers. Interconnect currently ++ // supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no ++ // constraint on IPv4 subnet length, the range would thus be {min: 29, ++ // max: 30}. ++ SubnetLengthRange *InterconnectRemoteLocationConstraintsSubnetLengthRange `json:"subnetLengthRange,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "PortPairRemoteLocation") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "PortPairRemoteLocation") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationConstraints) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationConstraints ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationConstraintsSubnetLengthRange struct { ++ Max int64 `json:"max,omitempty"` ++ ++ Min int64 `json:"min,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Max") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Max") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationConstraintsSubnetLengthRange) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationConstraintsSubnetLengthRange ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectRemoteLocationList: Response to the list request, and ++// contains a list of interconnect remote locations. ++type InterconnectRemoteLocationList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InterconnectRemoteLocation resources. ++ Items []*InterconnectRemoteLocation `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always ++ // compute#interconnectRemoteLocationList for lists of interconnect ++ // remote locations. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token lets you get the next page of ++ // results for list requests. If the number of results is larger than ++ // maxResults, use the nextPageToken as a value for the query parameter ++ // pageToken in the next list request. Subsequent list requests will ++ // have their own nextPageToken to continue paging through the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *InterconnectRemoteLocationListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectRemoteLocationListWarning: [Output Only] Informational ++// warning message. ++type InterconnectRemoteLocationListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InterconnectRemoteLocationListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationPermittedConnections struct { ++ // InterconnectLocation: [Output Only] URL of an Interconnect location ++ // that is permitted to connect to this Interconnect remote location. ++ InterconnectLocation string `json:"interconnectLocation,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "InterconnectLocation") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "InterconnectLocation") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationPermittedConnections ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // InterconnectsGetDiagnosticsResponse: Response for the + // InterconnectsGetDiagnosticsRequest. + type InterconnectsGetDiagnosticsResponse struct { +@@ -26930,7 +28777,7 @@ type MachineTypeAccelerators struct { + GuestAcceleratorCount int64 `json:"guestAcceleratorCount,omitempty"` + + // GuestAcceleratorType: The accelerator type resource name, not a full +- // URL, e.g. 'nvidia-tesla-k80'. ++ // URL, e.g. nvidia-tesla-t4. + GuestAcceleratorType string `json:"guestAcceleratorType,omitempty"` + + // ForceSendFields is a list of field names (e.g. +@@ -27589,9 +29436,9 @@ type ManagedInstance struct { + // is empty when the instance does not exist. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -28130,7 +29977,7 @@ type Network struct { + FirewallPolicy string `json:"firewallPolicy,omitempty"` + + // GatewayIPv4: [Output Only] The gateway address for default routing +- // out of the network, selected by GCP. ++ // out of the network, selected by Google Cloud. + GatewayIPv4 string `json:"gatewayIPv4,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This +@@ -28242,10 +30089,9 @@ type NetworkAttachment struct { + // property when you create the resource. + Description string `json:"description,omitempty"` + +- // Fingerprint: [Output Only] Fingerprint of this resource. A hash of +- // the contents stored in this object. This field is used in optimistic +- // locking. An up-to-date fingerprint must be provided in order to +- // patch. ++ // Fingerprint: Fingerprint of this resource. A hash of the contents ++ // stored in this object. This field is used in optimistic locking. An ++ // up-to-date fingerprint must be provided in order to patch. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The +@@ -28265,7 +30111,11 @@ type NetworkAttachment struct { + Name string `json:"name,omitempty"` + + // Network: [Output Only] The URL of the network which the Network +- // Attachment belongs to. ++ // Attachment belongs to. Practically it is inferred by fetching the ++ // network of the first subnetwork associated. Because it is required ++ // that all the subnetworks must be from the same network, it is assured ++ // that the Network Attachment belongs to the same network as all the ++ // subnetworks. + Network string `json:"network,omitempty"` + + // ProducerAcceptLists: Projects that are allowed to connect to this +@@ -28516,7 +30366,7 @@ func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, erro + // NetworkAttachmentConnectedEndpoint: [Output Only] A connection + // connected to this network attachment. + type NetworkAttachmentConnectedEndpoint struct { +- // IpAddress: The IP address assigned to the producer instance network ++ // IpAddress: The IPv4 address assigned to the producer instance network + // interface. This value will be a range in case of Serverless. + IpAddress string `json:"ipAddress,omitempty"` + +@@ -28524,7 +30374,7 @@ type NetworkAttachmentConnectedEndpoint struct { + // the IP was assigned. + ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` + +- // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork ++ // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork. + SecondaryIpCidrRanges []string `json:"secondaryIpCidrRanges,omitempty"` + + // Status: The status of a connected endpoint to this network +@@ -29396,6 +31246,9 @@ type NetworkEndpoint struct { + // the network endpoint group belongs to will be used. + IpAddress string `json:"ipAddress,omitempty"` + ++ // Ipv6Address: Optional IPv6 address of network endpoint. ++ Ipv6Address string `json:"ipv6Address,omitempty"` ++ + // Port: Optional port number of network endpoint. If not specified, the + // defaultPort for the network endpoint group will be used. + Port int64 `json:"port,omitempty"` +@@ -30850,10 +32703,11 @@ type NetworkInterface struct { + // number. It'll be empty if not specified by the users. + QueueCount int64 `json:"queueCount,omitempty"` + +- // StackType: The stack type for this network interface to identify +- // whether the IPv6 feature is enabled or not. If not specified, +- // IPV4_ONLY will be used. This field can be both set at instance +- // creation and update network interface operations. ++ // StackType: The stack type for this network interface. To assign only ++ // IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 ++ // addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This ++ // field can be both set at instance creation and update network ++ // interface operations. + // + // Possible values: + // "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 +@@ -38186,6 +40040,7 @@ type Quota struct { + // "COMMITTED_NVIDIA_A100_80GB_GPUS" + // "COMMITTED_NVIDIA_A100_GPUS" + // "COMMITTED_NVIDIA_K80_GPUS" ++ // "COMMITTED_NVIDIA_L4_GPUS" + // "COMMITTED_NVIDIA_P100_GPUS" + // "COMMITTED_NVIDIA_P4_GPUS" + // "COMMITTED_NVIDIA_T4_GPUS" +@@ -38237,11 +40092,15 @@ type Quota struct { + // "NETWORK_ATTACHMENTS" + // "NETWORK_ENDPOINT_GROUPS" + // "NETWORK_FIREWALL_POLICIES" ++ // "NET_LB_SECURITY_POLICIES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION" + // "NODE_GROUPS" + // "NODE_TEMPLATES" + // "NVIDIA_A100_80GB_GPUS" + // "NVIDIA_A100_GPUS" + // "NVIDIA_K80_GPUS" ++ // "NVIDIA_L4_GPUS" + // "NVIDIA_P100_GPUS" + // "NVIDIA_P100_VWS_GPUS" + // "NVIDIA_P4_GPUS" +@@ -38256,6 +40115,7 @@ type Quota struct { + // "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS" + // "PREEMPTIBLE_NVIDIA_A100_GPUS" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" ++ // "PREEMPTIBLE_NVIDIA_L4_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_GPUS" +@@ -38280,6 +40140,7 @@ type Quota struct { + // "ROUTES" + // "SECURITY_POLICIES" + // "SECURITY_POLICIES_PER_REGION" ++ // "SECURITY_POLICY_ADVANCED_RULES_PER_REGION" + // "SECURITY_POLICY_CEVAL_RULES" + // "SECURITY_POLICY_RULES" + // "SECURITY_POLICY_RULES_PER_REGION" +@@ -38519,6 +40380,44 @@ func (s *Region) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type RegionAddressesMoveRequest struct { ++ // Description: An optional destination address description if intended ++ // to be different from the source. ++ Description string `json:"description,omitempty"` ++ ++ // DestinationAddress: The URL of the destination address to move to. ++ // This can be a full or partial URL. For example, the following are all ++ // valid URLs to a address: - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /addresses/address - ++ // projects/project/regions/region/addresses/address Note that ++ // destination project must be different from the source project. So ++ // /regions/region/addresses/address is not valid partial url. ++ DestinationAddress string `json:"destinationAddress,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Description") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Description") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionAddressesMoveRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionAddressesMoveRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // RegionAutoscalerList: Contains a list of autoscalers. + type RegionAutoscalerList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the +@@ -39009,238 +40908,52 @@ func (s *RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// RegionInstanceGroupList: Contains a list of InstanceGroup resources. +-type RegionInstanceGroupList struct { +- // Id: [Output Only] Unique identifier for the resource; defined by the +- // server. +- Id string `json:"id,omitempty"` +- +- // Items: A list of InstanceGroup resources. +- Items []*InstanceGroup `json:"items,omitempty"` +- +- // Kind: The resource type. +- Kind string `json:"kind,omitempty"` +- +- // NextPageToken: [Output Only] This token allows you to get the next +- // page of results for list requests. If the number of results is larger +- // than maxResults, use the nextPageToken as a value for the query +- // parameter pageToken in the next list request. Subsequent list +- // requests will have their own nextPageToken to continue paging through +- // the results. +- NextPageToken string `json:"nextPageToken,omitempty"` +- +- // SelfLink: [Output Only] Server-defined URL for this resource. +- SelfLink string `json:"selfLink,omitempty"` +- +- // Warning: [Output Only] Informational warning message. +- Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` +- +- // ServerResponse contains the HTTP response code and headers from the +- // server. +- googleapi.ServerResponse `json:"-"` +- +- // ForceSendFields is a list of field names (e.g. "Id") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Id") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { +- type NoMethod RegionInstanceGroupList +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-// RegionInstanceGroupListWarning: [Output Only] Informational warning +-// message. +-type RegionInstanceGroupListWarning struct { +- // Code: [Output Only] A warning code, if applicable. For example, +- // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in +- // the response. +- // +- // Possible values: +- // "CLEANUP_FAILED" - Warning about failed cleanup of transient +- // changes made by a failed operation. +- // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was +- // created. +- // "DEPRECATED_TYPE_USED" - When deploying and at least one of the +- // resources has a type marked as deprecated +- // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk +- // that is larger than image size. +- // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the +- // resources has a type marked as experimental +- // "EXTERNAL_API_WARNING" - Warning that is present in an external api +- // call +- // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been +- // overridden. Deprecated unused field. +- // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an +- // injected kernel, which is deprecated. +- // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV +- // backend service is associated with a health check that is not of type +- // HTTP/HTTPS/HTTP2. +- // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a +- // exceedingly large number of resources +- // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type +- // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is +- // not assigned to an instance on the network. +- // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot +- // ip forward. +- // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's +- // nextHopInstance URL refers to an instance that does not have an ipv6 +- // interface on the same network as the route. +- // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL +- // refers to an instance that does not exist. +- // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance +- // URL refers to an instance that is not on the same network as the +- // route. +- // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not +- // have a status of RUNNING. +- // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to +- // continue the process despite the mentioned error. +- // "NO_RESULTS_ON_PAGE" - No results are present on a particular list +- // page. +- // "PARTIAL_SUCCESS" - Success is reported, but some results may be +- // missing due to errors +- // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource +- // that requires a TOS they have not accepted. +- // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a +- // resource is in use. +- // "RESOURCE_NOT_DELETED" - One or more of the resources set to +- // auto-delete could not be deleted because they were in use. +- // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is +- // ignored. +- // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in +- // instance group manager is valid as such, but its application does not +- // make a lot of sense, because it allows only single instance in +- // instance group. +- // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema +- // are present +- // "UNREACHABLE" - A given scope cannot be reached. +- Code string `json:"code,omitempty"` +- +- // Data: [Output Only] Metadata about this warning in key: value format. +- // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" +- // } +- Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` +- +- // Message: [Output Only] A human-readable description of the warning +- // code. +- Message string `json:"message,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "Code") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Code") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { +- type NoMethod RegionInstanceGroupListWarning +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-type RegionInstanceGroupListWarningData struct { +- // Key: [Output Only] A key that provides more detail on the warning +- // being returned. For example, for warnings where there are no results +- // in a list request for a particular zone, this key might be scope and +- // the key value might be the zone name. Other examples might be a key +- // indicating a deprecated resource and a suggested replacement, or a +- // warning about invalid network settings (for example, if an instance +- // attempts to perform IP forwarding but is not enabled for IP +- // forwarding). +- Key string `json:"key,omitempty"` +- +- // Value: [Output Only] A warning data value corresponding to the key. +- Value string `json:"value,omitempty"` +- +- // ForceSendFields is a list of field names (e.g. "Key") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. +- ForceSendFields []string `json:"-"` +- +- // NullFields is a list of field names (e.g. "Key") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. +- NullFields []string `json:"-"` +-} +- +-func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { +- type NoMethod RegionInstanceGroupListWarningData +- raw := NoMethod(*s) +- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +-} +- +-// RegionInstanceGroupManagerDeleteInstanceConfigReq: +-// RegionInstanceGroupManagers.deletePerInstanceConfigs +-type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { +- // Names: The list of instance names for which we want to delete +- // per-instance configs on this managed instance group. +- Names []string `json:"names,omitempty"` ++type RegionDisksStartAsyncReplicationRequest struct { ++ // AsyncSecondaryDisk: The secondary disk to start asynchronous ++ // replication to. You can provide this as a partial or full URL to the ++ // resource. For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /disks/disk - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /disks/disk - projects/project/zones/zone/disks/disk - ++ // projects/project/regions/region/disks/disk - zones/zone/disks/disk - ++ // regions/region/disks/disk ++ AsyncSecondaryDisk string `json:"asyncSecondaryDisk,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "Names") to +- // unconditionally include in API requests. By default, fields with ++ // ForceSendFields is a list of field names (e.g. "AsyncSecondaryDisk") ++ // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Names") to include in API +- // requests with the JSON null value. By default, fields with empty +- // values are omitted from API requests. However, any field with an +- // empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. ++ // NullFields is a list of field names (e.g. "AsyncSecondaryDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. + NullFields []string `json:"-"` + } + +-func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { +- type NoMethod RegionInstanceGroupManagerDeleteInstanceConfigReq ++func (s *RegionDisksStartAsyncReplicationRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionDisksStartAsyncReplicationRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// RegionInstanceGroupManagerList: Contains a list of managed instance +-// groups. +-type RegionInstanceGroupManagerList struct { ++// RegionInstanceGroupList: Contains a list of InstanceGroup resources. ++type RegionInstanceGroupList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + +- // Items: A list of InstanceGroupManager resources. +- Items []*InstanceGroupManager `json:"items,omitempty"` ++ // Items: A list of InstanceGroup resources. ++ Items []*InstanceGroup `json:"items,omitempty"` + +- // Kind: [Output Only] The resource type, which is always +- // compute#instanceGroupManagerList for a list of managed instance +- // groups that exist in th regional scope. ++ // Kind: The resource type. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next +@@ -39255,7 +40968,229 @@ type RegionInstanceGroupManagerList struct { + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. +- Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` ++ Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionInstanceGroupList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// RegionInstanceGroupListWarning: [Output Only] Informational warning ++// message. ++type RegionInstanceGroupListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionInstanceGroupListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type RegionInstanceGroupListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionInstanceGroupListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// RegionInstanceGroupManagerDeleteInstanceConfigReq: ++// RegionInstanceGroupManagers.deletePerInstanceConfigs ++type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { ++ // Names: The list of instance names for which we want to delete ++ // per-instance configs on this managed instance group. ++ Names []string `json:"names,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Names") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Names") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionInstanceGroupManagerDeleteInstanceConfigReq ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// RegionInstanceGroupManagerList: Contains a list of managed instance ++// groups. ++type RegionInstanceGroupManagerList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InstanceGroupManager resources. ++ Items []*InstanceGroupManager `json:"items,omitempty"` ++ ++ // Kind: [Output Only] The resource type, which is always ++ // compute#instanceGroupManagerList for a list of managed instance ++ // groups that exist in th regional scope. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token allows you to get the next ++ // page of results for list requests. If the number of results is larger ++ // than maxResults, use the nextPageToken as a value for the query ++ // parameter pageToken in the next list request. Subsequent list ++ // requests will have their own nextPageToken to continue paging through ++ // the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. +@@ -40337,6 +42272,33 @@ func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type RegionInstantSnapshotsExportRequest struct { ++ // ExportParams: Parameters to export the changed blocks. ++ ExportParams *InstantSnapshotExportParams `json:"exportParams,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ExportParams") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ExportParams") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionInstantSnapshotsExportRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionInstantSnapshotsExportRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // RegionList: Contains a list of region resources. + type RegionList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the +@@ -41767,6 +43729,10 @@ type ResourcePolicy struct { + + Description string `json:"description,omitempty"` + ++ // DiskConsistencyGroupPolicy: Resource policy for disk consistency ++ // groups. ++ DiskConsistencyGroupPolicy *ResourcePolicyDiskConsistencyGroupPolicy `json:"diskConsistencyGroupPolicy,omitempty"` ++ + // GroupPlacementPolicy: Resource policy for instances for placement + // configuration. + GroupPlacementPolicy *ResourcePolicyGroupPlacementPolicy `json:"groupPlacementPolicy,omitempty"` +@@ -42078,6 +44044,11 @@ func (s *ResourcePolicyDailyCycle) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// ResourcePolicyDiskConsistencyGroupPolicy: Resource policy for disk ++// consistency groups. ++type ResourcePolicyDiskConsistencyGroupPolicy struct { ++} ++ + // ResourcePolicyGroupPlacementPolicy: A GroupPlacementPolicy specifies + // resource placement configuration. It specifies the failure bucket + // separation as well as network locality +@@ -43775,6 +45746,17 @@ type RouterBgpPeer struct { + // Bfd: BFD configuration for the BGP peering. + Bfd *RouterBgpPeerBfd `json:"bfd,omitempty"` + ++ // CustomLearnedIpRanges: A list of user-defined custom learned route IP ++ // address ranges for a BGP session. ++ CustomLearnedIpRanges []*RouterBgpPeerCustomLearnedIpRange `json:"customLearnedIpRanges,omitempty"` ++ ++ // CustomLearnedRoutePriority: The user-defined custom learned route ++ // priority for a BGP session. This value is applied to all custom ++ // learned route ranges for the session. You can choose a value from `0` ++ // to `65335`. If you don't provide a value, Google Cloud assigns a ++ // priority of `100` to the ranges. ++ CustomLearnedRoutePriority int64 `json:"customLearnedRoutePriority,omitempty"` ++ + // Enable: The status of the BGP peer connection. If set to FALSE, any + // active session with the peer is terminated and all associated routing + // information is removed. If set to TRUE, the peer connection can be +@@ -43935,6 +45917,36 @@ func (s *RouterBgpPeerBfd) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type RouterBgpPeerCustomLearnedIpRange struct { ++ // Range: The custom learned route IP address range. Must be a valid ++ // CIDR-formatted prefix. If an IP address is provided without a subnet ++ // mask, it is interpreted as, for IPv4, a `/32` singular IP address ++ // range, and, for IPv6, `/128`. ++ Range string `json:"range,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Range") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Range") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RouterBgpPeerCustomLearnedIpRange) MarshalJSON() ([]byte, error) { ++ type NoMethod RouterBgpPeerCustomLearnedIpRange ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type RouterInterface struct { + // IpRange: IP address and range of the interface. The IP range must be + // in the RFC3927 link-local IP address space. The value must be a +@@ -44264,6 +46276,21 @@ func (s *RouterMd5AuthenticationKey) MarshalJSON() ([]byte, error) { + // that would be used for NAT. GCP would auto-allocate ephemeral IPs if + // no external IPs are provided. + type RouterNat struct { ++ // AutoNetworkTier: The network tier to use when automatically reserving ++ // IP addresses. Must be one of: PREMIUM, STANDARD. If not specified, ++ // PREMIUM tier will be used. ++ // ++ // Possible values: ++ // "FIXED_STANDARD" - Public internet quality with fixed bandwidth. ++ // "PREMIUM" - High quality, Google-grade network tier, support for ++ // all networking products. ++ // "STANDARD" - Public internet quality, only limited support for ++ // other networking products. ++ // "STANDARD_OVERRIDES_FIXED_STANDARD" - (Output only) Temporary tier ++ // for FIXED_STANDARD when fixed standard tier is expired or not ++ // configured. ++ AutoNetworkTier string `json:"autoNetworkTier,omitempty"` ++ + // DrainNatIps: A list of URLs of the IP resources to be drained. These + // IPs must be valid static external IPs that have been assigned to the + // NAT. These IPs should be used for updating/patching a NAT only. +@@ -44347,10 +46374,9 @@ type RouterNat struct { + // in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list + // of Subnetworks are allowed to Nat (specified in the field subnetwork + // below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. +- // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or +- // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any +- // other Router.Nat section in any Router for this network in this +- // region. ++ // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then ++ // there should not be any other Router.Nat section in any Router for ++ // this network in this region. + // + // Possible values: + // "ALL_SUBNETWORKS_ALL_IP_RANGES" - All the IP ranges in every +@@ -44382,7 +46408,7 @@ type RouterNat struct { + // to 30s if not set. + UdpIdleTimeoutSec int64 `json:"udpIdleTimeoutSec,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "DrainNatIps") to ++ // ForceSendFields is a list of field names (e.g. "AutoNetworkTier") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -44390,12 +46416,13 @@ type RouterNat struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "DrainNatIps") to include +- // in API requests with the JSON null value. By default, fields with +- // empty values are omitted from API requests. However, any field with +- // an empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. ++ // NullFields is a list of field names (e.g. "AutoNetworkTier") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. + NullFields []string `json:"-"` + } + +@@ -46152,6 +48179,15 @@ type SecurityPolicy struct { + // "FIREWALL" + Type string `json:"type,omitempty"` + ++ // UserDefinedFields: Definitions of user-defined fields for ++ // CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to ++ // 4 bytes extracted from a fixed offset in the packet, relative to the ++ // IPv4, IPv6, TCP, or UDP header, with an optional mask to select ++ // certain bits. Rules may then specify matching values for these ++ // fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" ++ // base: IPV4 offset: 6 size: 2 mask: "0x1fff" ++ UserDefinedFields []*SecurityPolicyUserDefinedField `json:"userDefinedFields,omitempty"` ++ + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +@@ -46268,13 +48304,17 @@ func (s *SecurityPolicyAdaptiveProtectionConfigAutoDeployConfig) UnmarshalJSON(d + } + + // SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig: +-// Configuration options for L7 DDoS detection. ++// Configuration options for L7 DDoS detection. This field is only ++// supported in Global Security Policies of type CLOUD_ARMOR. + type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { +- // Enable: If set to true, enables CAAP for L7 DDoS detection. ++ // Enable: If set to true, enables CAAP for L7 DDoS detection. This ++ // field is only supported in Global Security Policies of type ++ // CLOUD_ARMOR. + Enable bool `json:"enable,omitempty"` + + // RuleVisibility: Rule visibility can be one of the following: STANDARD +- // - opaque rules. (default) PREMIUM - transparent rules. ++ // - opaque rules. (default) PREMIUM - transparent rules. This field is ++ // only supported in Global Security Policies of type CLOUD_ARMOR. + // + // Possible values: + // "PREMIUM" +@@ -46319,6 +48359,10 @@ type SecurityPolicyAdvancedOptionsConfig struct { + // "VERBOSE" + LogLevel string `json:"logLevel,omitempty"` + ++ // UserIpRequestHeaders: An optional list of case-insensitive request ++ // header names to use for resolving the callers client IP address. ++ UserIpRequestHeaders []string `json:"userIpRequestHeaders,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "JsonCustomConfig") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -46640,7 +48684,8 @@ type SecurityPolicyRecaptchaOptionsConfig struct { + // GOOGLE_RECAPTCHA under the security policy. The specified site key + // needs to be created from the reCAPTCHA API. The user is responsible + // for the validity of the specified site key. If not specified, a +- // Google-managed site key is used. ++ // Google-managed site key is used. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectSiteKey string `json:"redirectSiteKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RedirectSiteKey") to +@@ -46708,10 +48753,11 @@ type SecurityPolicyRule struct { + // rate_limit_options to be set. - redirect: redirect to a different + // target. This can either be an internal reCAPTCHA redirect, or an + // external URL-based redirect via a 302 response. Parameters for this +- // action can be configured via redirectOptions. - throttle: limit +- // client traffic to the configured threshold. Configure parameters for +- // this action in rateLimitOptions. Requires rate_limit_options to be +- // set for this. ++ // action can be configured via redirectOptions. This action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. - ++ // throttle: limit client traffic to the configured threshold. Configure ++ // parameters for this action in rateLimitOptions. Requires ++ // rate_limit_options to be set for this. + Action string `json:"action,omitempty"` + + // Description: An optional description of this resource. Provide this +@@ -46735,7 +48781,8 @@ type SecurityPolicyRule struct { + EnableLogging bool `json:"enableLogging,omitempty"` + + // HeaderAction: Optional, additional actions that are performed on +- // headers. ++ // headers. This field is only supported in Global Security Policies of ++ // type CLOUD_ARMOR. + HeaderAction *SecurityPolicyRuleHttpHeaderAction `json:"headerAction,omitempty"` + + // Kind: [Output only] Type of the resource. Always +@@ -46746,6 +48793,31 @@ type SecurityPolicyRule struct { + // If it evaluates to true, the corresponding 'action' is enforced. + Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` + ++ // NetworkMatch: A match condition that incoming packets are evaluated ++ // against for CLOUD_ARMOR_NETWORK security policies. If it matches, the ++ // corresponding 'action' is enforced. The match criteria for a rule ++ // consists of built-in match fields (like 'srcIpRanges') and ++ // potentially multiple user-defined match fields ('userDefinedFields'). ++ // Field values may be extracted directly from the packet or derived ++ // from it (e.g. 'srcRegionCodes'). Some fields may not be present in ++ // every packet (e.g. 'srcPorts'). A user-defined field is only present ++ // if the base header is found in the packet and the entire field is in ++ // bounds. Each match field may specify which values can match it, ++ // listing one or more ranges, prefixes, or exact values that are ++ // considered a match for the field. A field value must be present in ++ // order to match a specified match field. If no match values are ++ // specified for a match field, then any field value is considered to ++ // match it, and it's not required to be present. For strings specifying ++ // '*' is also equivalent to match all. For a packet to match a rule, ++ // all specified match fields must match the corresponding field values ++ // derived from the packet. Example: networkMatch: srcIpRanges: - ++ // "192.0.2.0/24" - "198.51.100.0/24" userDefinedFields: - name: ++ // "ipv4_fragment_offset" values: - "1-0x1fff" The above match condition ++ // matches packets with a source IP in 192.0.2.0/24 or 198.51.100.0/24 ++ // and a user-defined field named "ipv4_fragment_offset" with a value ++ // between 1 and 0x1fff inclusive. ++ NetworkMatch *SecurityPolicyRuleNetworkMatcher `json:"networkMatch,omitempty"` ++ + // PreconfiguredWafConfig: Preconfigured WAF configuration to be applied + // for the rule. If the rule does not evaluate preconfigured WAF rules, + // i.e., if evaluatePreconfiguredWaf() is not used, this field will have +@@ -46766,7 +48838,8 @@ type SecurityPolicyRule struct { + RateLimitOptions *SecurityPolicyRuleRateLimitOptions `json:"rateLimitOptions,omitempty"` + + // RedirectOptions: Parameters defining the redirect action. Cannot be +- // specified for any other actions. ++ // specified for any other actions. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectOptions *SecurityPolicyRuleRedirectOptions `json:"redirectOptions,omitempty"` + + // RuleNumber: Identifier for the rule. This is only unique within the +@@ -46886,9 +48959,19 @@ type SecurityPolicyRuleMatcher struct { + + // Expr: User defined CEVAL expression. A CEVAL expression is used to + // specify match criteria such as origin.ip, source.region_code and +- // contents in the request header. ++ // contents in the request header. Expressions containing ++ // `evaluateThreatIntelligence` require Cloud Armor Managed Protection ++ // Plus tier and are not supported in Edge Policies nor in Regional ++ // Policies. Expressions containing ++ // `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor ++ // Managed Protection Plus tier and are only supported in Global ++ // Security Policies. + Expr *Expr `json:"expr,omitempty"` + ++ // ExprOptions: The configuration options available when specifying a ++ // user defined CEVAL expression (i.e., 'expr'). ++ ExprOptions *SecurityPolicyRuleMatcherExprOptions `json:"exprOptions,omitempty"` ++ + // VersionedExpr: Preconfigured versioned expression. If this field is + // specified, config must also be specified. Available preconfigured + // expressions along with their requirements are: SRC_IPS_V1 - must +@@ -46998,6 +49081,166 @@ func (s *SecurityPolicyRuleMatcherConfigLayer4Config) MarshalJSON() ([]byte, err + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type SecurityPolicyRuleMatcherExprOptions struct { ++ // RecaptchaOptions: reCAPTCHA configuration options to be applied for ++ // the rule. If the rule does not evaluate reCAPTCHA tokens, this field ++ // will have no effect. ++ RecaptchaOptions *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions `json:"recaptchaOptions,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "RecaptchaOptions") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "RecaptchaOptions") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleMatcherExprOptions) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleMatcherExprOptions ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions struct { ++ // ActionTokenSiteKeys: A list of site keys to be used during the ++ // validation of reCAPTCHA action-tokens. The provided site keys need to ++ // be created from reCAPTCHA API under the same project where the ++ // security policy is created. ++ ActionTokenSiteKeys []string `json:"actionTokenSiteKeys,omitempty"` ++ ++ // SessionTokenSiteKeys: A list of site keys to be used during the ++ // validation of reCAPTCHA session-tokens. The provided site keys need ++ // to be created from reCAPTCHA API under the same project where the ++ // security policy is created. ++ SessionTokenSiteKeys []string `json:"sessionTokenSiteKeys,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ActionTokenSiteKeys") ++ // to unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ActionTokenSiteKeys") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// SecurityPolicyRuleNetworkMatcher: Represents a match condition that ++// incoming network traffic is evaluated against. ++type SecurityPolicyRuleNetworkMatcher struct { ++ // DestIpRanges: Destination IPv4/IPv6 addresses or CIDR prefixes, in ++ // standard text format. ++ DestIpRanges []string `json:"destIpRanges,omitempty"` ++ ++ // DestPorts: Destination port numbers for TCP/UDP/SCTP. Each element ++ // can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. ++ // "0-1023"). ++ DestPorts []string `json:"destPorts,omitempty"` ++ ++ // IpProtocols: IPv4 protocol / IPv6 next header (after extension ++ // headers). Each element can be an 8-bit unsigned decimal number (e.g. ++ // "6"), range (e.g. "253-254"), or one of the following protocol names: ++ // "tcp", "udp", "icmp", "esp", "ah", "ipip", or "sctp". ++ IpProtocols []string `json:"ipProtocols,omitempty"` ++ ++ // SrcAsns: BGP Autonomous System Number associated with the source IP ++ // address. ++ SrcAsns []int64 `json:"srcAsns,omitempty"` ++ ++ // SrcIpRanges: Source IPv4/IPv6 addresses or CIDR prefixes, in standard ++ // text format. ++ SrcIpRanges []string `json:"srcIpRanges,omitempty"` ++ ++ // SrcPorts: Source port numbers for TCP/UDP/SCTP. Each element can be a ++ // 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). ++ SrcPorts []string `json:"srcPorts,omitempty"` ++ ++ // SrcRegionCodes: Two-letter ISO 3166-1 alpha-2 country code associated ++ // with the source IP address. ++ SrcRegionCodes []string `json:"srcRegionCodes,omitempty"` ++ ++ // UserDefinedFields: User-defined fields. Each element names a defined ++ // field and lists the matching values for that field. ++ UserDefinedFields []*SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch `json:"userDefinedFields,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "DestIpRanges") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "DestIpRanges") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleNetworkMatcher) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleNetworkMatcher ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch struct { ++ // Name: Name of the user-defined field, as given in the definition. ++ Name string `json:"name,omitempty"` ++ ++ // Values: Matching values of the field. Each element can be a 32-bit ++ // unsigned decimal or hexadecimal (starting with "0x") number (e.g. ++ // "64") or range (e.g. "0x400-0x7ff"). ++ Values []string `json:"values,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Name") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Name") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type SecurityPolicyRulePreconfiguredWafConfig struct { + // Exclusions: A list of exclusions to apply during preconfigured WAF + // evaluation. +@@ -47192,12 +49435,14 @@ type SecurityPolicyRuleRateLimitOptions struct { + // response code, or redirect to a different endpoint. Valid options are + // `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, + // and 502, and `redirect`, where the redirect parameters come from +- // `exceedRedirectOptions` below. ++ // `exceedRedirectOptions` below. The `redirect` action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. + ExceedAction string `json:"exceedAction,omitempty"` + + // ExceedRedirectOptions: Parameters defining the redirect action that + // is used as the exceed action. Cannot be specified if the exceed +- // action is not redirect. ++ // action is not redirect. This field is only supported in Global ++ // Security Policies of type CLOUD_ARMOR. + ExceedRedirectOptions *SecurityPolicyRuleRedirectOptions `json:"exceedRedirectOptions,omitempty"` + + // RateLimitThreshold: Threshold at which to begin ratelimiting. +@@ -47358,6 +49603,62 @@ func (s *SecurityPolicyRuleRedirectOptions) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type SecurityPolicyUserDefinedField struct { ++ // Base: The base relative to which 'offset' is measured. Possible ++ // values are: - IPV4: Points to the beginning of the IPv4 header. - ++ // IPV6: Points to the beginning of the IPv6 header. - TCP: Points to ++ // the beginning of the TCP header, skipping over any IPv4 options or ++ // IPv6 extension headers. Not present for non-first fragments. - UDP: ++ // Points to the beginning of the UDP header, skipping over any IPv4 ++ // options or IPv6 extension headers. Not present for non-first ++ // fragments. required ++ // ++ // Possible values: ++ // "IPV4" ++ // "IPV6" ++ // "TCP" ++ // "UDP" ++ Base string `json:"base,omitempty"` ++ ++ // Mask: If specified, apply this mask (bitwise AND) to the field to ++ // ignore bits before matching. Encoded as a hexadecimal number ++ // (starting with "0x"). The last byte of the field (in network byte ++ // order) corresponds to the least significant byte of the mask. ++ Mask string `json:"mask,omitempty"` ++ ++ // Name: The name of this field. Must be unique within the policy. ++ Name string `json:"name,omitempty"` ++ ++ // Offset: Offset of the first byte of the field (in network byte order) ++ // relative to 'base'. ++ Offset int64 `json:"offset,omitempty"` ++ ++ // Size: Size of the field in bytes. Valid values: 1-4. ++ Size int64 `json:"size,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Base") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Base") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyUserDefinedField) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyUserDefinedField ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // SecuritySettings: The authentication and authorization settings for a + // BackendService. + type SecuritySettings struct { +@@ -47375,7 +49676,7 @@ type SecuritySettings struct { + // should authenticate with this service's backends. clientTlsPolicy + // only applies to a global BackendService with the loadBalancingScheme + // set to INTERNAL_SELF_MANAGED. If left blank, communications are not +- // encrypted. Note: This field currently has no impact. ++ // encrypted. + ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` + + // SubjectAltNames: Optional. A list of Subject Alternative Names (SANs) +@@ -47390,8 +49691,7 @@ type SecuritySettings struct { + // Public Key Infrastructure which provisions server identities. Only + // applies to a global BackendService with loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. Only applies when BackendService has an +- // attached clientTlsPolicy with clientCertificate (mTLS mode). Note: +- // This field currently has no impact. ++ // attached clientTlsPolicy with clientCertificate (mTLS mode). + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Authentication") to +@@ -47540,7 +49840,7 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + // attachment represents a service that a producer has exposed. It + // encapsulates the load balancer which fronts the service runs and a + // list of NAT IP ranges that the producers uses to represent the +-// consumers connecting to the service. next tag = 20 ++// consumers connecting to the service. + type ServiceAttachment struct { + // ConnectedEndpoints: [Output Only] An array of connections for all the + // consumers connected to this service attachment. +@@ -47625,6 +49925,18 @@ type ServiceAttachment struct { + // the PSC service attachment. + PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"` + ++ // ReconcileConnections: This flag determines whether a consumer ++ // accept/reject list change can reconcile the statuses of existing ++ // ACCEPTED or REJECTED PSC endpoints. - If false, connection policy ++ // update will only affect existing PENDING PSC endpoints. Existing ++ // ACCEPTED/REJECTED endpoints will remain untouched regardless how the ++ // connection policy is modified . - If true, update will affect both ++ // PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED ++ // PSC endpoint will be moved to REJECTED if its project is added to the ++ // reject list. For newly created service attachment, this boolean ++ // defaults to true. ++ ReconcileConnections bool `json:"reconcileConnections,omitempty"` ++ + // Region: [Output Only] URL of the region where the service attachment + // resides. This field applies only to the region resource. You must + // specify this field as part of the HTTP request URL. It is not +@@ -48830,6 +51142,24 @@ type Snapshot struct { + // disk name. + SourceDiskId string `json:"sourceDiskId,omitempty"` + ++ // SourceInstantSnapshot: The source instant snapshot used to create ++ // this snapshot. You can provide this as a partial or full URL to the ++ // resource. For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /instantSnapshots/instantSnapshot - ++ // projects/project/zones/zone/instantSnapshots/instantSnapshot - ++ // zones/zone/instantSnapshots/instantSnapshot ++ SourceInstantSnapshot string `json:"sourceInstantSnapshot,omitempty"` ++ ++ // SourceInstantSnapshotId: [Output Only] The unique ID of the instant ++ // snapshot used to create this snapshot. This value identifies the ++ // exact instant snapshot that was used to create this persistent disk. ++ // For example, if you created the persistent disk from an instant ++ // snapshot that was later deleted and recreated under the same name, ++ // the source instant snapshot ID would identify the exact instant ++ // snapshot that was used. ++ SourceInstantSnapshotId string `json:"sourceInstantSnapshotId,omitempty"` ++ + // SourceSnapshotSchedulePolicy: [Output Only] URL of the resource + // policy which created this scheduled snapshot. + SourceSnapshotSchedulePolicy string `json:"sourceSnapshotSchedulePolicy,omitempty"` +@@ -51063,8 +53393,8 @@ type Subnetwork struct { + // If this field is not explicitly set, it will not appear in get + // listings. If not set the default behavior is determined by the org + // policy, if there is no org policy specified, then it will default to +- // disabled. This field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // disabled. This field isn't supported if the subnet purpose field is ++ // set to REGIONAL_MANAGED_PROXY. + EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` + + // ExternalIpv6Prefix: The external IPv6 address range that is owned by +@@ -51157,12 +53487,20 @@ type Subnetwork struct { + PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` + + // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal +@@ -51184,9 +53522,9 @@ type Subnetwork struct { + ReservedInternalRange string `json:"reservedInternalRange,omitempty"` + + // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // +@@ -51662,6 +54000,8 @@ type SubnetworkLogConfig struct { + // field is not explicitly set, it will not appear in get listings. If + // not set the default behavior is determined by the org policy, if + // there is no org policy specified, then it will default to disabled. ++ // Flow logging isn't supported if the subnet purpose field is set to ++ // REGIONAL_MANAGED_PROXY. + Enable bool `json:"enable,omitempty"` + + // FilterExpr: Can only be specified if VPC flow logs for this +@@ -52674,6 +55014,15 @@ type TargetHttpProxy struct { + // ForwardingRule for more details. + HttpFilters []string `json:"httpFilters,omitempty"` + ++ // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection ++ // open, after completing a response, while there is no matching traffic ++ // (in seconds). If an HTTP keep-alive is not specified, a default value ++ // (610 seconds) will be used. For Global external HTTP(S) load ++ // balancer, the minimum allowed value is 5 seconds and the maximum ++ // allowed value is 1200 seconds. For Global external HTTP(S) load ++ // balancer (classic), this option is not available publicly. ++ HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` ++ + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` +@@ -53296,7 +55645,9 @@ func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) + + type TargetHttpsProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetHttpsProxy. ++ // TargetHttpsProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -53419,7 +55770,9 @@ type TargetHttpsProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -53454,6 +55807,15 @@ type TargetHttpsProxy struct { + // ForwardingRule for more details. + HttpFilters []string `json:"httpFilters,omitempty"` + ++ // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection ++ // open, after completing a response, while there is no matching traffic ++ // (in seconds). If an HTTP keep-alive is not specified, a default value ++ // (610 seconds) will be used. For Global external HTTP(S) load ++ // balancer, the minimum allowed value is 5 seconds and the maximum ++ // allowed value is 1200 seconds. For Global external HTTP(S) load ++ // balancer (classic), this option is not available publicly. ++ HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` ++ + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` +@@ -53512,9 +55874,11 @@ type TargetHttpsProxy struct { + // networksecurity.ServerTlsPolicy resource that describes how the proxy + // should authenticate inbound traffic. serverTlsPolicy only applies to + // a global TargetHttpsProxy attached to globalForwardingRules with the +- // loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, +- // communications are not encrypted. Note: This field currently has no +- // impact. ++ // loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or ++ // EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are ++ // accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, ++ // EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy ++ // documentation. If left blank, communications are not encrypted. + ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to +@@ -54001,6 +56365,10 @@ type TargetInstance struct { + // network that the default network interface belongs to. + Network string `json:"network,omitempty"` + ++ // SecurityPolicy: [Output Only] The resource URL for the security ++ // policy associated with this target instance. ++ SecurityPolicy string `json:"securityPolicy,omitempty"` ++ + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + +@@ -54660,6 +57028,10 @@ type TargetPool struct { + // resides. + Region string `json:"region,omitempty"` + ++ // SecurityPolicy: [Output Only] The resource URL for the security ++ // policy associated with this target pool. ++ SecurityPolicy string `json:"securityPolicy,omitempty"` ++ + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + +@@ -55500,7 +57872,9 @@ func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) + + type TargetSslProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetSslProxy. ++ // TargetSslProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -55598,7 +57972,9 @@ type TargetSslProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -58415,12 +60791,20 @@ type UsableSubnetwork struct { + Network string `json:"network,omitempty"` + + // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal +@@ -58435,9 +60819,9 @@ type UsableSubnetwork struct { + Purpose string `json:"purpose,omitempty"` + + // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // +@@ -59113,6 +61497,16 @@ type VpnGateway struct { + // property when you create the resource. + Description string `json:"description,omitempty"` + ++ // GatewayIpVersion: The IP family of the gateway IPs for the HA-VPN ++ // gateway interfaces. If not specified, IPV4 will be used. ++ // ++ // Possible values: ++ // "IPV4" - Every HA-VPN gateway interface is configured with an IPv4 ++ // address. ++ // "IPV6" - Every HA-VPN gateway interface is configured with an IPv6 ++ // address. ++ GatewayIpVersion string `json:"gatewayIpVersion,omitempty"` ++ + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` +@@ -59666,7 +62060,7 @@ type VpnGatewayStatusTunnel struct { + + // PeerGatewayInterface: The peer gateway interface this VPN tunnel is + // connected to, the peer gateway could either be an external VPN +- // gateway or GCP VPN gateway. ++ // gateway or a Google Cloud VPN gateway. + PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` + + // TunnelUrl: URL reference to the VPN tunnel. +@@ -59699,8 +62093,8 @@ func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { + + // VpnGatewayStatusVpnConnection: A VPN connection contains all VPN + // tunnels connected from this VpnGateway to the same peer gateway. The +-// peer gateway could either be a external VPN gateway or GCP VPN +-// gateway. ++// peer gateway could either be an external VPN gateway or a Google ++// Cloud VPN gateway. + type VpnGatewayStatusVpnConnection struct { + // PeerExternalGateway: URL reference to the peer external VPN gateways + // to which the VPN tunnels in this VPN connection are connected. This +@@ -59768,6 +62162,12 @@ type VpnGatewayVpnGatewayInterface struct { + // address must be a regional external IP address. + IpAddress string `json:"ipAddress,omitempty"` + ++ // Ipv6Address: [Output Only] IPv6 address for this VPN interface ++ // associated with the VPN gateway. The IPv6 address must be a regional ++ // external IPv6 address. The format is RFC 5952 format (e.g. ++ // 2001:db8::2d9:51:0:0). ++ Ipv6Address string `json:"ipv6Address,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -63303,6 +65703,194 @@ func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) erro + } + } + ++// method id "compute.addresses.move": ++ ++type AddressesMoveCall struct { ++ s *Service ++ project string ++ region string ++ address string ++ regionaddressesmoverequest *RegionAddressesMoveRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Move: Moves the specified address resource. ++// ++// - address: Name of the address resource to move. ++// - project: Source project ID which the Address is moved from. ++// - region: Name of the region for this request. ++func (r *AddressesService) Move(project string, region string, address string, regionaddressesmoverequest *RegionAddressesMoveRequest) *AddressesMoveCall { ++ c := &AddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.address = address ++ c.regionaddressesmoverequest = regionaddressesmoverequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *AddressesMoveCall) RequestId(requestId string) *AddressesMoveCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *AddressesMoveCall) Fields(s ...googleapi.Field) *AddressesMoveCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *AddressesMoveCall) Context(ctx context.Context) *AddressesMoveCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *AddressesMoveCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *AddressesMoveCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionaddressesmoverequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{address}/move") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "address": c.address, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.addresses.move" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *AddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Moves the specified address resource.", ++ // "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", ++ // "httpMethod": "POST", ++ // "id": "compute.addresses.move", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "address" ++ // ], ++ // "parameters": { ++ // "address": { ++ // "description": "Name of the address resource to move.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Source project ID which the Address is moved from.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/addresses/{address}/move", ++ // "request": { ++ // "$ref": "RegionAddressesMoveRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.addresses.setLabels": + + type AddressesSetLabelsCall struct { +@@ -71499,6 +74087,182 @@ func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggrega + } + } + ++// method id "compute.disks.bulkInsert": ++ ++type DisksBulkInsertCall struct { ++ s *Service ++ project string ++ zone string ++ bulkinsertdiskresource *BulkInsertDiskResource ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// BulkInsert: Bulk create a set of disks. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *DisksService) BulkInsert(project string, zone string, bulkinsertdiskresource *BulkInsertDiskResource) *DisksBulkInsertCall { ++ c := &DisksBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.bulkinsertdiskresource = bulkinsertdiskresource ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *DisksBulkInsertCall) RequestId(requestId string) *DisksBulkInsertCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *DisksBulkInsertCall) Fields(s ...googleapi.Field) *DisksBulkInsertCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *DisksBulkInsertCall) Context(ctx context.Context) *DisksBulkInsertCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *DisksBulkInsertCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *DisksBulkInsertCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertdiskresource) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/bulkInsert") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.disks.bulkInsert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *DisksBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Bulk create a set of disks.", ++ // "flatPath": "projects/{project}/zones/{zone}/disks/bulkInsert", ++ // "httpMethod": "POST", ++ // "id": "compute.disks.bulkInsert", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/disks/bulkInsert", ++ // "request": { ++ // "$ref": "BulkInsertDiskResource" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.disks.createSnapshot": + + type DisksCreateSnapshotCall struct { +@@ -73460,38 +76224,54 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error + + } + +-// method id "compute.disks.testIamPermissions": ++// method id "compute.disks.startAsyncReplication": + +-type DisksTestIamPermissionsCall struct { +- s *Service +- project string +- zone string +- resource string +- testpermissionsrequest *TestPermissionsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type DisksStartAsyncReplicationCall struct { ++ s *Service ++ project string ++ zone string ++ disk string ++ disksstartasyncreplicationrequest *DisksStartAsyncReplicationRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. ++// StartAsyncReplication: Starts asynchronous replication. Must be ++// invoked on the primary disk. + // ++// - disk: The name of the persistent disk. + // - project: Project ID for this request. +-// - resource: Name or id of the resource for this request. + // - zone: The name of the zone for this request. +-func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { +- c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *DisksService) StartAsyncReplication(project string, zone string, disk string, disksstartasyncreplicationrequest *DisksStartAsyncReplicationRequest) *DisksStartAsyncReplicationCall { ++ c := &DisksStartAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone +- c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest ++ c.disk = disk ++ c.disksstartasyncreplicationrequest = disksstartasyncreplicationrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *DisksStartAsyncReplicationCall) RequestId(requestId string) *DisksStartAsyncReplicationCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { ++func (c *DisksStartAsyncReplicationCall) Fields(s ...googleapi.Field) *DisksStartAsyncReplicationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -73499,21 +76279,21 @@ func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIam + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { ++func (c *DisksStartAsyncReplicationCall) Context(ctx context.Context) *DisksStartAsyncReplicationCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *DisksTestIamPermissionsCall) Header() http.Header { ++func (c *DisksStartAsyncReplicationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *DisksStartAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -73521,14 +76301,14 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksstartasyncreplicationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -73536,21 +76316,21 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "resource": c.resource, ++ "project": c.project, ++ "zone": c.zone, ++ "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.disks.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++// Do executes the "compute.disks.startAsyncReplication" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *DisksStartAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -73569,7 +76349,7 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &TestPermissionsResponse{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -73581,16 +76361,23 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", ++ // "description": "Starts asynchronous replication. Must be invoked on the primary disk.", ++ // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", + // "httpMethod": "POST", +- // "id": "compute.disks.testIamPermissions", ++ // "id": "compute.disks.startAsyncReplication", + // "parameterOrder": [ + // "project", + // "zone", +- // "resource" ++ // "disk" + // ], + // "parameters": { ++ // "disk": { ++ // "description": "The name of the persistent disk.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -73598,11 +76385,9 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", + // "type": "string" + // }, + // "zone": { +@@ -73613,55 +76398,44 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", ++ // "path": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", + // "request": { +- // "$ref": "TestPermissionsRequest" ++ // "$ref": "DisksStartAsyncReplicationRequest" + // }, + // "response": { +- // "$ref": "TestPermissionsResponse" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.disks.update": ++// method id "compute.disks.stopAsyncReplication": + +-type DisksUpdateCall struct { ++type DisksStopAsyncReplicationCall struct { + s *Service + project string + zone string + disk string +- disk2 *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// Update: Updates the specified disk with the data included in the +-// request. The update is performed only on selected fields included as +-// part of update-mask. Only the following fields can be modified: +-// user_license. ++// StopAsyncReplication: Stops asynchronous replication. Can be invoked ++// either on the primary or on the secondary disk. + // +-// - disk: The disk name for this request. ++// - disk: The name of the persistent disk. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *DisksService) Update(project string, zone string, disk string, disk2 *Disk) *DisksUpdateCall { +- c := &DisksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *DisksService) StopAsyncReplication(project string, zone string, disk string) *DisksStopAsyncReplicationCall { ++ c := &DisksStopAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk +- c.disk2 = disk2 +- return c +-} +- +-// Paths sets the optional parameter "paths": +-func (c *DisksUpdateCall) Paths(paths ...string) *DisksUpdateCall { +- c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + return c + } + +@@ -73676,22 +76450,15 @@ func (c *DisksUpdateCall) Paths(paths ...string) *DisksUpdateCall { + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *DisksUpdateCall) RequestId(requestId string) *DisksUpdateCall { ++func (c *DisksStopAsyncReplicationCall) RequestId(requestId string) *DisksStopAsyncReplicationCall { + c.urlParams_.Set("requestId", requestId) + return c + } + +-// UpdateMask sets the optional parameter "updateMask": update_mask +-// indicates fields to be updated as part of this request. +-func (c *DisksUpdateCall) UpdateMask(updateMask string) *DisksUpdateCall { +- c.urlParams_.Set("updateMask", updateMask) +- return c +-} +- + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *DisksUpdateCall) Fields(s ...googleapi.Field) *DisksUpdateCall { ++func (c *DisksStopAsyncReplicationCall) Fields(s ...googleapi.Field) *DisksStopAsyncReplicationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -73699,21 +76466,21 @@ func (c *DisksUpdateCall) Fields(s ...googleapi.Field) *DisksUpdateCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *DisksUpdateCall) Context(ctx context.Context) *DisksUpdateCall { ++func (c *DisksStopAsyncReplicationCall) Context(ctx context.Context) *DisksStopAsyncReplicationCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *DisksUpdateCall) Header() http.Header { ++func (c *DisksStopAsyncReplicationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { ++func (c *DisksStopAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -73721,16 +76488,11 @@ func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk2) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -73743,14 +76505,563 @@ func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.disks.update" call. ++// Do executes the "compute.disks.stopAsyncReplication" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *DisksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *DisksStopAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", ++ // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", ++ // "httpMethod": "POST", ++ // "id": "compute.disks.stopAsyncReplication", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "disk" ++ // ], ++ // "parameters": { ++ // "disk": { ++ // "description": "The name of the persistent disk.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.disks.stopGroupAsyncReplication": ++ ++type DisksStopGroupAsyncReplicationCall struct { ++ s *Service ++ project string ++ zone string ++ disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// StopGroupAsyncReplication: Stops asynchronous replication for a ++// consistency group of disks. Can be invoked either in the primary or ++// secondary scope. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. This must be the zone ++// of the primary or secondary disks in the consistency group. ++func (r *DisksService) StopGroupAsyncReplication(project string, zone string, disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource) *DisksStopGroupAsyncReplicationCall { ++ c := &DisksStopGroupAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.disksstopgroupasyncreplicationresource = disksstopgroupasyncreplicationresource ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *DisksStopGroupAsyncReplicationCall) RequestId(requestId string) *DisksStopGroupAsyncReplicationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *DisksStopGroupAsyncReplicationCall) Fields(s ...googleapi.Field) *DisksStopGroupAsyncReplicationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *DisksStopGroupAsyncReplicationCall) Context(ctx context.Context) *DisksStopGroupAsyncReplicationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *DisksStopGroupAsyncReplicationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *DisksStopGroupAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksstopgroupasyncreplicationresource) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.disks.stopGroupAsyncReplication" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *DisksStopGroupAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", ++ // "flatPath": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", ++ // "httpMethod": "POST", ++ // "id": "compute.disks.stopGroupAsyncReplication", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request. This must be the zone of the primary or secondary disks in the consistency group.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", ++ // "request": { ++ // "$ref": "DisksStopGroupAsyncReplicationResource" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.disks.testIamPermissions": ++ ++type DisksTestIamPermissionsCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { ++ c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *DisksTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.disks.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.disks.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.disks.update": ++ ++type DisksUpdateCall struct { ++ s *Service ++ project string ++ zone string ++ disk string ++ disk2 *Disk ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Update: Updates the specified disk with the data included in the ++// request. The update is performed only on selected fields included as ++// part of update-mask. Only the following fields can be modified: ++// user_license. ++// ++// - disk: The disk name for this request. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *DisksService) Update(project string, zone string, disk string, disk2 *Disk) *DisksUpdateCall { ++ c := &DisksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.disk = disk ++ c.disk2 = disk2 ++ return c ++} ++ ++// Paths sets the optional parameter "paths": ++func (c *DisksUpdateCall) Paths(paths ...string) *DisksUpdateCall { ++ c.urlParams_.SetMulti("paths", append([]string{}, paths...)) ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *DisksUpdateCall) RequestId(requestId string) *DisksUpdateCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// UpdateMask sets the optional parameter "updateMask": update_mask ++// indicates fields to be updated as part of this request. ++func (c *DisksUpdateCall) UpdateMask(updateMask string) *DisksUpdateCall { ++ c.urlParams_.Set("updateMask", updateMask) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *DisksUpdateCall) Fields(s ...googleapi.Field) *DisksUpdateCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *DisksUpdateCall) Context(ctx context.Context) *DisksUpdateCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *DisksUpdateCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk2) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("PATCH", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "disk": c.disk, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.disks.update" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *DisksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -81855,6 +85166,183 @@ func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList + } + } + ++// method id "compute.globalAddresses.move": ++ ++type GlobalAddressesMoveCall struct { ++ s *Service ++ project string ++ address string ++ globaladdressesmoverequest *GlobalAddressesMoveRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Move: Moves the specified address resource from one project to ++// another project. ++// ++// - address: Name of the address resource to move. ++// - project: Source project ID which the Address is moved from. ++func (r *GlobalAddressesService) Move(project string, address string, globaladdressesmoverequest *GlobalAddressesMoveRequest) *GlobalAddressesMoveCall { ++ c := &GlobalAddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.address = address ++ c.globaladdressesmoverequest = globaladdressesmoverequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *GlobalAddressesMoveCall) RequestId(requestId string) *GlobalAddressesMoveCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *GlobalAddressesMoveCall) Fields(s ...googleapi.Field) *GlobalAddressesMoveCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *GlobalAddressesMoveCall) Context(ctx context.Context) *GlobalAddressesMoveCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *GlobalAddressesMoveCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *GlobalAddressesMoveCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.globaladdressesmoverequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}/move") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "address": c.address, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.globalAddresses.move" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *GlobalAddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Moves the specified address resource from one project to another project.", ++ // "flatPath": "projects/{project}/global/addresses/{address}/move", ++ // "httpMethod": "POST", ++ // "id": "compute.globalAddresses.move", ++ // "parameterOrder": [ ++ // "project", ++ // "address" ++ // ], ++ // "parameters": { ++ // "address": { ++ // "description": "Name of the address resource to move.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Source project ID which the Address is moved from.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/addresses/{address}/move", ++ // "request": { ++ // "$ref": "GlobalAddressesMoveRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.globalAddresses.setLabels": + + type GlobalAddressesSetLabelsCall struct { +@@ -97888,6 +101376,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C + } + return ret, nil + // { ++ // "deprecated": true, + // "description": "Motifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use instanceGroupManagers.patch instead.", + // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "httpMethod": "POST", +@@ -109088,32 +112577,33 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio + + } + +-// method id "compute.instances.setServiceAccount": ++// method id "compute.instances.setSecurityPolicy": + +-type InstancesSetServiceAccountCall struct { ++type InstancesSetSecurityPolicyCall struct { + s *Service + project string + zone string + instance string +- instancessetserviceaccountrequest *InstancesSetServiceAccountRequest ++ instancessetsecuritypolicyrequest *InstancesSetSecurityPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// SetServiceAccount: Sets the service account on the instance. For more +-// information, read Changing the service account and access scopes for +-// an instance. ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified instance. For more information, see Google Cloud Armor ++// Overview + // +-// - instance: Name of the instance resource to start. +-// - project: Project ID for this request. +-// - zone: The name of the zone for this request. +-func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { +- c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instance: Name of the Instance resource to which the security ++// policy should be set. The name should conform to RFC1035. ++// - project: Project ID for this request. ++// - zone: Name of the zone scoping this request. ++func (r *InstancesService) SetSecurityPolicy(project string, zone string, instance string, instancessetsecuritypolicyrequest *InstancesSetSecurityPolicyRequest) *InstancesSetSecurityPolicyCall { ++ c := &InstancesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.instancessetserviceaccountrequest = instancessetserviceaccountrequest ++ c.instancessetsecuritypolicyrequest = instancessetsecuritypolicyrequest + return c + } + +@@ -109128,7 +112618,7 @@ func (r *InstancesService) SetServiceAccount(project string, zone string, instan + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) RequestId(requestId string) *InstancesSetSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -109136,7 +112626,7 @@ func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesS + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetSecurityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -109144,21 +112634,21 @@ func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *Instances + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { ++func (c *InstancesSetSecurityPolicyCall) Context(ctx context.Context) *InstancesSetSecurityPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetServiceAccountCall) Header() http.Header { ++func (c *InstancesSetSecurityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -109166,14 +112656,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetsecuritypolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -109188,14 +112678,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setServiceAccount" call. ++// Do executes the "compute.instances.setSecurityPolicy" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -109226,10 +112716,10 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + } + return ret, nil + // { +- // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "description": "Sets the Google Cloud Armor security policy for the specified instance. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + // "httpMethod": "POST", +- // "id": "compute.instances.setServiceAccount", ++ // "id": "compute.instances.setSecurityPolicy", + // "parameterOrder": [ + // "project", + // "zone", +@@ -109237,9 +112727,8 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + // ], + // "parameters": { + // "instance": { +- // "description": "Name of the instance resource to start.", ++ // "description": "Name of the Instance resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -109256,16 +112745,16 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + // "type": "string" + // }, + // "zone": { +- // "description": "The name of the zone for this request.", ++ // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", + // "request": { +- // "$ref": "InstancesSetServiceAccountRequest" ++ // "$ref": "InstancesSetSecurityPolicyRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -109278,33 +112767,32 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper + + } + +-// method id "compute.instances.setShieldedInstanceIntegrityPolicy": ++// method id "compute.instances.setServiceAccount": + +-type InstancesSetShieldedInstanceIntegrityPolicyCall struct { +- s *Service +- project string +- zone string +- instance string +- shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesSetServiceAccountCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ instancessetserviceaccountrequest *InstancesSetServiceAccountRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance +-// integrity policy for an instance. You can only use this method on a +-// running instance. This method supports PATCH semantics and uses the +-// JSON merge patch format and processing rules. ++// SetServiceAccount: Sets the service account on the instance. For more ++// information, read Changing the service account and access scopes for ++// an instance. + // +-// - instance: Name or id of the instance scoping this request. ++// - instance: Name of the instance resource to start. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { +- c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { ++ c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy ++ c.instancessetserviceaccountrequest = instancessetserviceaccountrequest + return c + } + +@@ -109319,7 +112807,7 @@ func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zo + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -109327,7 +112815,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId st + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -109335,21 +112823,21 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi. + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { ++func (c *InstancesSetServiceAccountCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -109357,16 +112845,16 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -109379,14 +112867,14 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. ++// Do executes the "compute.instances.setServiceAccount" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -109417,10 +112905,10 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + } + return ret, nil + // { +- // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", +- // "httpMethod": "PATCH", +- // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", ++ // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", ++ // "httpMethod": "POST", ++ // "id": "compute.instances.setServiceAccount", + // "parameterOrder": [ + // "project", + // "zone", +@@ -109428,7 +112916,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + // ], + // "parameters": { + // "instance": { +- // "description": "Name or id of the instance scoping this request.", ++ // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -109454,9 +112942,9 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "request": { +- // "$ref": "ShieldedInstanceIntegrityPolicy" ++ // "$ref": "InstancesSetServiceAccountRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -109469,33 +112957,33 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C + + } + +-// method id "compute.instances.setShieldedVmIntegrityPolicy": ++// method id "compute.instances.setShieldedInstanceIntegrityPolicy": + +-type InstancesSetShieldedVmIntegrityPolicyCall struct { +- s *Service +- project string +- zone string +- instance string +- shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstancesSetShieldedInstanceIntegrityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy +-// for a VM instance. You can only use this method on a running VM +-// instance. This method supports PATCH semantics and uses the JSON +-// merge patch format and processing rules. ++// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance ++// integrity policy for an instance. You can only use this method on a ++// running instance. This method supports PATCH semantics and uses the ++// JSON merge patch format and processing rules. + // +-// - instance: Name of the instance scoping this request. ++// - instance: Name or id of the instance scoping this request. + // - project: Project ID for this request. + // - zone: The name of the zone for this request. +-func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { +- c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { ++ c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance +- c.shieldedvmintegritypolicy = shieldedvmintegritypolicy ++ c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy + return c + } + +@@ -109510,7 +112998,7 @@ func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone str + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -109518,7 +113006,7 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -109526,21 +113014,21 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -109548,14 +113036,14 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { +@@ -109570,14 +113058,205 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. ++// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", ++ // "httpMethod": "PATCH", ++ // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "instance" ++ // ], ++ // "parameters": { ++ // "instance": { ++ // "description": "Name or id of the instance scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", ++ // "request": { ++ // "$ref": "ShieldedInstanceIntegrityPolicy" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.instances.setShieldedVmIntegrityPolicy": ++ ++type InstancesSetShieldedVmIntegrityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy ++// for a VM instance. You can only use this method on a running VM ++// instance. This method supports PATCH semantics and uses the JSON ++// merge patch format and processing rules. ++// ++// - instance: Name of the instance scoping this request. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { ++ c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.instance = instance ++ c.shieldedvmintegritypolicy = shieldedvmintegritypolicy ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("PATCH", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "instance": c.instance, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -109875,6 +113554,22 @@ func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, + return c + } + ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +@@ -109987,6 +113682,11 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) + // "required": true, + // "type": "string" + // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", +@@ -112174,9 +115874,9 @@ func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) ( + + } + +-// method id "compute.interconnectAttachments.aggregatedList": ++// method id "compute.instantSnapshots.aggregatedList": + +-type InterconnectAttachmentsAggregatedListCall struct { ++type InstantSnapshotsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -112185,12 +115885,11 @@ type InterconnectAttachmentsAggregatedListCall struct { + header_ http.Header + } + +-// AggregatedList: Retrieves an aggregated list of interconnect +-// attachments. ++// AggregatedList: Retrieves an aggregated list of instantSnapshots. + // + // - project: Project ID for this request. +-func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { +- c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InstantSnapshotsService) AggregatedList(project string) *InstantSnapshotsAggregatedListCall { ++ c := &InstantSnapshotsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -112230,7 +115929,7 @@ func (r *InterconnectAttachmentsService) AggregatedList(project string) *Interco + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) Filter(filter string) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -112243,7 +115942,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *Inter + // response. For resource types which predate this field, if this flag + // is omitted or false, only scopes of the scope types where the + // resource type is expected to be found will be included. +-func (c *InterconnectAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c + } +@@ -112254,7 +115953,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) IncludeAllScopes(includeAllS + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) MaxResults(maxResults int64) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -112268,7 +115967,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) OrderBy(orderBy string) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -112276,7 +115975,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *Int + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) PageToken(pageToken string) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -112285,7 +115984,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -112293,7 +115992,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnP + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) Fields(s ...googleapi.Field) *InstantSnapshotsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -112303,7 +116002,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) IfNoneMatch(entityTag string) *InstantSnapshotsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -112311,21 +116010,21 @@ func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { ++func (c *InstantSnapshotsAggregatedListCall) Context(ctx context.Context) *InstantSnapshotsAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { ++func (c *InstantSnapshotsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -112338,7 +116037,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/interconnectAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/instantSnapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -112351,15 +116050,14 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.aggregatedList" call. +-// Exactly one of *InterconnectAttachmentAggregatedList or error will be ++// Do executes the "compute.instantSnapshots.aggregatedList" call. ++// Exactly one of *InstantSnapshotAggregatedList or error will be + // non-nil. Any non-2xx status code is an error. Response headers are in +-// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +-// (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { ++// either *InstantSnapshotAggregatedList.ServerResponse.Header or (if a ++// response was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InstantSnapshotsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstantSnapshotAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -112378,7 +116076,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InterconnectAttachmentAggregatedList{ ++ ret := &InstantSnapshotAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -112390,10 +116088,10 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt + } + return ret, nil + // { +- // "description": "Retrieves an aggregated list of interconnect attachments.", +- // "flatPath": "projects/{project}/aggregated/interconnectAttachments", ++ // "description": "Retrieves an aggregated list of instantSnapshots.", ++ // "flatPath": "projects/{project}/aggregated/instantSnapshots", + // "httpMethod": "GET", +- // "id": "compute.interconnectAttachments.aggregatedList", ++ // "id": "compute.instantSnapshots.aggregatedList", + // "parameterOrder": [ + // "project" + // ], +@@ -112439,9 +116137,9 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/aggregated/interconnectAttachments", ++ // "path": "projects/{project}/aggregated/instantSnapshots", + // "response": { +- // "$ref": "InterconnectAttachmentAggregatedList" ++ // "$ref": "InstantSnapshotAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -112455,7 +116153,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { ++func (c *InstantSnapshotsAggregatedListCall) Pages(ctx context.Context, f func(*InstantSnapshotAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -112473,29 +116171,33 @@ func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f + } + } + +-// method id "compute.interconnectAttachments.delete": ++// method id "compute.instantSnapshots.delete": + +-type InterconnectAttachmentsDeleteCall struct { +- s *Service +- project string +- region string +- interconnectAttachment string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstantSnapshotsDeleteCall struct { ++ s *Service ++ project string ++ zone string ++ instantSnapshot string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified interconnect attachment. ++// Delete: Deletes the specified InstantSnapshot resource. Keep in mind ++// that deleting a single instantSnapshot might not necessarily delete ++// all the data on that instantSnapshot. If any data on the ++// instantSnapshot that is marked for deletion is needed for subsequent ++// instantSnapshots, the data will be moved to the next corresponding ++// instantSnapshot. For more information, see Deleting instantSnapshots. + // +-// - interconnectAttachment: Name of the interconnect attachment to +-// delete. +-// - project: Project ID for this request. +-// - region: Name of the region for this request. +-func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { +- c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instantSnapshot: Name of the InstantSnapshot resource to delete. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) Delete(project string, zone string, instantSnapshot string) *InstantSnapshotsDeleteCall { ++ c := &InstantSnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.interconnectAttachment = interconnectAttachment ++ c.zone = zone ++ c.instantSnapshot = instantSnapshot + return c + } + +@@ -112510,7 +116212,7 @@ func (r *InterconnectAttachmentsService) Delete(project string, region string, i + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { ++func (c *InstantSnapshotsDeleteCall) RequestId(requestId string) *InstantSnapshotsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -112518,7 +116220,7 @@ func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *Interco + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { ++func (c *InstantSnapshotsDeleteCall) Fields(s ...googleapi.Field) *InstantSnapshotsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -112526,21 +116228,21 @@ func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *Interc + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { ++func (c *InstantSnapshotsDeleteCall) Context(ctx context.Context) *InstantSnapshotsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { ++func (c *InstantSnapshotsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -112550,7 +116252,7 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { +@@ -112558,21 +116260,21 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "interconnectAttachment": c.interconnectAttachment, ++ "project": c.project, ++ "zone": c.zone, ++ "instantSnapshot": c.instantSnapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.delete" call. ++// Do executes the "compute.instantSnapshots.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstantSnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -112603,18 +116305,18 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O + } + return ret, nil + // { +- // "description": "Deletes the specified interconnect attachment.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ // "description": "Deletes the specified InstantSnapshot resource. Keep in mind that deleting a single instantSnapshot might not necessarily delete all the data on that instantSnapshot. If any data on the instantSnapshot that is marked for deletion is needed for subsequent instantSnapshots, the data will be moved to the next corresponding instantSnapshot. For more information, see Deleting instantSnapshots.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + // "httpMethod": "DELETE", +- // "id": "compute.interconnectAttachments.delete", ++ // "id": "compute.instantSnapshots.delete", + // "parameterOrder": [ + // "project", +- // "region", +- // "interconnectAttachment" ++ // "zone", ++ // "instantSnapshot" + // ], + // "parameters": { +- // "interconnectAttachment": { +- // "description": "Name of the interconnect attachment to delete.", ++ // "instantSnapshot": { ++ // "description": "Name of the InstantSnapshot resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -112627,20 +116329,209 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region for this request.", ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.instantSnapshots.export": ++ ++type InstantSnapshotsExportCall struct { ++ s *Service ++ project string ++ zone string ++ instantSnapshot string ++ instantsnapshotsexportrequest *InstantSnapshotsExportRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Export: Export the changed blocks between two instant snapshots to a ++// customer's bucket in the user specified format. ++// ++// - instantSnapshot: Name of the instant snapshot to export. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) Export(project string, zone string, instantSnapshot string, instantsnapshotsexportrequest *InstantSnapshotsExportRequest) *InstantSnapshotsExportCall { ++ c := &InstantSnapshotsExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.instantSnapshot = instantSnapshot ++ c.instantsnapshotsexportrequest = instantsnapshotsexportrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InstantSnapshotsExportCall) RequestId(requestId string) *InstantSnapshotsExportCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InstantSnapshotsExportCall) Fields(s ...googleapi.Field) *InstantSnapshotsExportCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InstantSnapshotsExportCall) Context(ctx context.Context) *InstantSnapshotsExportCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InstantSnapshotsExportCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InstantSnapshotsExportCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instantsnapshotsexportrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}/export") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "instantSnapshot": c.instantSnapshot, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.instantSnapshots.export" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InstantSnapshotsExportCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Export the changed blocks between two instant snapshots to a customer's bucket in the user specified format.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}/export", ++ // "httpMethod": "POST", ++ // "id": "compute.instantSnapshots.export", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "instantSnapshot" ++ // ], ++ // "parameters": { ++ // "instantSnapshot": { ++ // "description": "Name of the instant snapshot to export.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}/export", ++ // "request": { ++ // "$ref": "InstantSnapshotsExportRequest" ++ // }, + // "response": { + // "$ref": "Operation" + // }, +@@ -112652,37 +116543,37 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O + + } + +-// method id "compute.interconnectAttachments.get": ++// method id "compute.instantSnapshots.get": + +-type InterconnectAttachmentsGetCall struct { +- s *Service +- project string +- region string +- interconnectAttachment string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type InstantSnapshotsGetCall struct { ++ s *Service ++ project string ++ zone string ++ instantSnapshot string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified interconnect attachment. ++// Get: Returns the specified InstantSnapshot resource in the specified ++// zone. + // +-// - interconnectAttachment: Name of the interconnect attachment to +-// return. +-// - project: Project ID for this request. +-// - region: Name of the region for this request. +-func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { +- c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instantSnapshot: Name of the InstantSnapshot resource to return. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) Get(project string, zone string, instantSnapshot string) *InstantSnapshotsGetCall { ++ c := &InstantSnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.interconnectAttachment = interconnectAttachment ++ c.zone = zone ++ c.instantSnapshot = instantSnapshot + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { ++func (c *InstantSnapshotsGetCall) Fields(s ...googleapi.Field) *InstantSnapshotsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -112692,7 +116583,7 @@ func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *Interconn + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { ++func (c *InstantSnapshotsGetCall) IfNoneMatch(entityTag string) *InstantSnapshotsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -112700,21 +116591,21 @@ func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *Intercon + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { ++func (c *InstantSnapshotsGetCall) Context(ctx context.Context) *InstantSnapshotsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsGetCall) Header() http.Header { ++func (c *InstantSnapshotsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -112727,7 +116618,7 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -112735,21 +116626,21 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "interconnectAttachment": c.interconnectAttachment, ++ "project": c.project, ++ "zone": c.zone, ++ "instantSnapshot": c.instantSnapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.get" call. +-// Exactly one of *InterconnectAttachment or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *InterconnectAttachment.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.instantSnapshots.get" call. ++// Exactly one of *InstantSnapshot or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *InstantSnapshot.ServerResponse.Header or (if a response was returned ++// at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { ++func (c *InstantSnapshotsGetCall) Do(opts ...googleapi.CallOption) (*InstantSnapshot, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -112768,7 +116659,7 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InterconnectAttachment{ ++ ret := &InstantSnapshot{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -112780,18 +116671,18 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte + } + return ret, nil + // { +- // "description": "Returns the specified interconnect attachment.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ // "description": "Returns the specified InstantSnapshot resource in the specified zone.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + // "httpMethod": "GET", +- // "id": "compute.interconnectAttachments.get", ++ // "id": "compute.instantSnapshots.get", + // "parameterOrder": [ + // "project", +- // "region", +- // "interconnectAttachment" ++ // "zone", ++ // "instantSnapshot" + // ], + // "parameters": { +- // "interconnectAttachment": { +- // "description": "Name of the interconnect attachment to return.", ++ // "instantSnapshot": { ++ // "description": "Name of the InstantSnapshot resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -112804,17 +116695,17 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region for this request.", ++ // "zone": { ++ // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{instantSnapshot}", + // "response": { +- // "$ref": "InterconnectAttachment" ++ // "$ref": "InstantSnapshot" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -112825,28 +116716,213 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte + + } + +-// method id "compute.interconnectAttachments.insert": ++// method id "compute.instantSnapshots.getIamPolicy": + +-type InterconnectAttachmentsInsertCall struct { +- s *Service +- project string +- region string +- interconnectattachment *InterconnectAttachment +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstantSnapshotsGetIamPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates an InterconnectAttachment in the specified project +-// using the data included in the request. ++// GetIamPolicy: Gets the access control policy for a resource. May be ++// empty if no such policy or resource exists. + // + // - project: Project ID for this request. +-// - region: Name of the region for this request. +-func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { +- c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) GetIamPolicy(project string, zone string, resource string) *InstantSnapshotsGetIamPolicyCall { ++ c := &InstantSnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.interconnectattachment = interconnectattachment ++ c.zone = zone ++ c.resource = resource ++ return c ++} ++ ++// OptionsRequestedPolicyVersion sets the optional parameter ++// "optionsRequestedPolicyVersion": Requested IAM Policy version. ++func (c *InstantSnapshotsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *InstantSnapshotsGetIamPolicyCall { ++ c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InstantSnapshotsGetIamPolicyCall) Fields(s ...googleapi.Field) *InstantSnapshotsGetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InstantSnapshotsGetIamPolicyCall) IfNoneMatch(entityTag string) *InstantSnapshotsGetIamPolicyCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InstantSnapshotsGetIamPolicyCall) Context(ctx context.Context) *InstantSnapshotsGetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InstantSnapshotsGetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InstantSnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{resource}/getIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.instantSnapshots.getIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *InstantSnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/getIamPolicy", ++ // "httpMethod": "GET", ++ // "id": "compute.instantSnapshots.getIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "optionsRequestedPolicyVersion": { ++ // "description": "Requested IAM Policy version.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/getIamPolicy", ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.instantSnapshots.insert": ++ ++type InstantSnapshotsInsertCall struct { ++ s *Service ++ project string ++ zone string ++ instantsnapshot *InstantSnapshot ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates an instant snapshot in the specified zone. ++// ++// - project: Project ID for this request. ++// - zone: Name of the zone for this request. ++func (r *InstantSnapshotsService) Insert(project string, zone string, instantsnapshot *InstantSnapshot) *InstantSnapshotsInsertCall { ++ c := &InstantSnapshotsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.instantsnapshot = instantsnapshot + return c + } + +@@ -112861,22 +116937,15 @@ func (r *InterconnectAttachmentsService) Insert(project string, region string, i + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { ++func (c *InstantSnapshotsInsertCall) RequestId(requestId string) *InstantSnapshotsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } + +-// ValidateOnly sets the optional parameter "validateOnly": If true, the +-// request will not be committed. +-func (c *InterconnectAttachmentsInsertCall) ValidateOnly(validateOnly bool) *InterconnectAttachmentsInsertCall { +- c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) +- return c +-} +- + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { ++func (c *InstantSnapshotsInsertCall) Fields(s ...googleapi.Field) *InstantSnapshotsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -112884,21 +116953,21 @@ func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *Interc + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { ++func (c *InstantSnapshotsInsertCall) Context(ctx context.Context) *InstantSnapshotsInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsInsertCall) Header() http.Header { ++func (c *InstantSnapshotsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -112906,14 +116975,14 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instantsnapshot) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -112922,19 +116991,19 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "region": c.region, ++ "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.insert" call. ++// Do executes the "compute.instantSnapshots.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstantSnapshotsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -112965,13 +117034,13 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O + } + return ret, nil + // { +- // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", ++ // "description": "Creates an instant snapshot in the specified zone.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots", + // "httpMethod": "POST", +- // "id": "compute.interconnectAttachments.insert", ++ // "id": "compute.instantSnapshots.insert", + // "parameterOrder": [ + // "project", +- // "region" ++ // "zone" + // ], + // "parameters": { + // "project": { +@@ -112981,27 +117050,22 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, +- // "validateOnly": { +- // "description": "If true, the request will not be committed.", +- // "location": "query", +- // "type": "boolean" ++ // "zone": { ++ // "description": "Name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots", + // "request": { +- // "$ref": "InterconnectAttachment" ++ // "$ref": "InstantSnapshot" + // }, + // "response": { + // "$ref": "Operation" +@@ -113014,27 +117078,27 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O + + } + +-// method id "compute.interconnectAttachments.list": ++// method id "compute.instantSnapshots.list": + +-type InterconnectAttachmentsListCall struct { ++type InstantSnapshotsListCall struct { + s *Service + project string +- region string ++ zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// List: Retrieves the list of interconnect attachments contained within +-// the specified region. ++// List: Retrieves the list of InstantSnapshot resources contained ++// within the specified zone. + // + // - project: Project ID for this request. +-// - region: Name of the region for this request. +-func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { +- c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) List(project string, zone string) *InstantSnapshotsListCall { ++ c := &InstantSnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region ++ c.zone = zone + return c + } + +@@ -113073,7 +117137,7 @@ func (r *InterconnectAttachmentsService) List(project string, region string) *In + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) Filter(filter string) *InstantSnapshotsListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -113084,7 +117148,7 @@ func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAtt + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) MaxResults(maxResults int64) *InstantSnapshotsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -113098,7 +117162,7 @@ func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *Intercon + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) OrderBy(orderBy string) *InstantSnapshotsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -113106,7 +117170,7 @@ func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectA + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) PageToken(pageToken string) *InstantSnapshotsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -113115,7 +117179,7 @@ func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *Interconn + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstantSnapshotsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -113123,7 +117187,7 @@ func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSucc + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) Fields(s ...googleapi.Field) *InstantSnapshotsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -113133,7 +117197,7 @@ func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *Intercon + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) IfNoneMatch(entityTag string) *InstantSnapshotsListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -113141,21 +117205,21 @@ func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *Interco + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { ++func (c *InstantSnapshotsListCall) Context(ctx context.Context) *InstantSnapshotsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsListCall) Header() http.Header { ++func (c *InstantSnapshotsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -113168,7 +117232,7 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -113177,19 +117241,19 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "region": c.region, ++ "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.list" call. +-// Exactly one of *InterconnectAttachmentList or error will be non-nil. +-// Any non-2xx status code is an error. Response headers are in either +-// *InterconnectAttachmentList.ServerResponse.Header or (if a response +-// was returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.instantSnapshots.list" call. ++// Exactly one of *InstantSnapshotList or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *InstantSnapshotList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { ++func (c *InstantSnapshotsListCall) Do(opts ...googleapi.CallOption) (*InstantSnapshotList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -113208,7 +117272,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InterconnectAttachmentList{ ++ ret := &InstantSnapshotList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -113220,13 +117284,13 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int + } + return ret, nil + // { +- // "description": "Retrieves the list of interconnect attachments contained within the specified region.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", ++ // "description": "Retrieves the list of InstantSnapshot resources contained within the specified zone.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots", + // "httpMethod": "GET", +- // "id": "compute.interconnectAttachments.list", ++ // "id": "compute.instantSnapshots.list", + // "parameterOrder": [ + // "project", +- // "region" ++ // "zone" + // ], + // "parameters": { + // "filter": { +@@ -113259,22 +117323,22 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots", + // "response": { +- // "$ref": "InterconnectAttachmentList" ++ // "$ref": "InstantSnapshotList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -113288,7 +117352,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { ++func (c *InstantSnapshotsListCall) Pages(ctx context.Context, f func(*InstantSnapshotList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -113306,56 +117370,38 @@ func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*Int + } + } + +-// method id "compute.interconnectAttachments.patch": ++// method id "compute.instantSnapshots.setIamPolicy": + +-type InterconnectAttachmentsPatchCall struct { +- s *Service +- project string +- region string +- interconnectAttachment string +- interconnectattachment *InterconnectAttachment +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstantSnapshotsSetIamPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ zonesetpolicyrequest *ZoneSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Patch: Updates the specified interconnect attachment with the data +-// included in the request. This method supports PATCH semantics and +-// uses the JSON merge patch format and processing rules. ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. + // +-// - interconnectAttachment: Name of the interconnect attachment to +-// patch. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { +- c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstantSnapshotsSetIamPolicyCall { ++ c := &InstantSnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.interconnectAttachment = interconnectAttachment +- c.interconnectattachment = interconnectattachment +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { +- c.urlParams_.Set("requestId", requestId) ++ c.zone = zone ++ c.resource = resource ++ c.zonesetpolicyrequest = zonesetpolicyrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { ++func (c *InstantSnapshotsSetIamPolicyCall) Fields(s ...googleapi.Field) *InstantSnapshotsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -113363,21 +117409,21 @@ func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *Interco + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { ++func (c *InstantSnapshotsSetIamPolicyCall) Context(ctx context.Context) *InstantSnapshotsSetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsPatchCall) Header() http.Header { ++func (c *InstantSnapshotsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -113385,36 +117431,36 @@ func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "interconnectAttachment": c.interconnectAttachment, ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.patch" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.instantSnapshots.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *InstantSnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -113433,7 +117479,7 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -113445,23 +117491,16 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op + } + return ret, nil + // { +- // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", +- // "httpMethod": "PATCH", +- // "id": "compute.interconnectAttachments.patch", ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setIamPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.instantSnapshots.setIamPolicy", + // "parameterOrder": [ + // "project", +- // "region", +- // "interconnectAttachment" ++ // "zone", ++ // "resource" + // ], + // "parameters": { +- // "interconnectAttachment": { +- // "description": "Name of the interconnect attachment to patch.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -113469,25 +117508,27 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region scoping this request.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setIamPolicy", + // "request": { +- // "$ref": "InterconnectAttachment" ++ // "$ref": "ZoneSetPolicyRequest" + // }, + // "response": { +- // "$ref": "Operation" ++ // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -113497,31 +117538,31 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op + + } + +-// method id "compute.interconnectAttachments.setLabels": ++// method id "compute.instantSnapshots.setLabels": + +-type InterconnectAttachmentsSetLabelsCall struct { +- s *Service +- project string +- region string +- resource string +- regionsetlabelsrequest *RegionSetLabelsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InstantSnapshotsSetLabelsCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ zonesetlabelsrequest *ZoneSetLabelsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetLabels: Sets the labels on an InterconnectAttachment. To learn +-// more about labels, read the Labeling Resources documentation. ++// SetLabels: Sets the labels on a instantSnapshot in the given zone. To ++// learn more about labels, read the Labeling Resources documentation. + // + // - project: Project ID for this request. +-// - region: The region for this request. + // - resource: Name or id of the resource for this request. +-func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { +- c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *InstantSnapshotsSetLabelsCall { ++ c := &InstantSnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region ++ c.zone = zone + c.resource = resource +- c.regionsetlabelsrequest = regionsetlabelsrequest ++ c.zonesetlabelsrequest = zonesetlabelsrequest + return c + } + +@@ -113536,7 +117577,7 @@ func (r *InterconnectAttachmentsService) SetLabels(project string, region string + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { ++func (c *InstantSnapshotsSetLabelsCall) RequestId(requestId string) *InstantSnapshotsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -113544,7 +117585,7 @@ func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *Inte + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { ++func (c *InstantSnapshotsSetLabelsCall) Fields(s ...googleapi.Field) *InstantSnapshotsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -113552,21 +117593,21 @@ func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *Int + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { ++func (c *InstantSnapshotsSetLabelsCall) Context(ctx context.Context) *InstantSnapshotsSetLabelsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { ++func (c *InstantSnapshotsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -113574,14 +117615,14 @@ func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Resp + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -113590,20 +117631,20 @@ func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Resp + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "region": c.region, ++ "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.setLabels" call. ++// Do executes the "compute.instantSnapshots.setLabels" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstantSnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -113634,13 +117675,13 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", ++ // "description": "Sets the labels on a instantSnapshot in the given zone. To learn more about labels, read the Labeling Resources documentation.", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setLabels", + // "httpMethod": "POST", +- // "id": "compute.interconnectAttachments.setLabels", ++ // "id": "compute.instantSnapshots.setLabels", + // "parameterOrder": [ + // "project", +- // "region", ++ // "zone", + // "resource" + // ], + // "parameters": { +@@ -113651,13 +117692,6 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "The region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", +@@ -113669,11 +117703,18 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/setLabels", + // "request": { +- // "$ref": "RegionSetLabelsRequest" ++ // "$ref": "ZoneSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -113686,12 +117727,12 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.interconnectAttachments.testIamPermissions": ++// method id "compute.instantSnapshots.testIamPermissions": + +-type InterconnectAttachmentsTestIamPermissionsCall struct { ++type InstantSnapshotsTestIamPermissionsCall struct { + s *Service + project string +- region string ++ zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams +@@ -113703,12 +117744,12 @@ type InterconnectAttachmentsTestIamPermissionsCall struct { + // specified resource. + // + // - project: Project ID for this request. +-// - region: The name of the region for this request. + // - resource: Name or id of the resource for this request. +-func (r *InterconnectAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectAttachmentsTestIamPermissionsCall { +- c := &InterconnectAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - zone: The name of the zone for this request. ++func (r *InstantSnapshotsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstantSnapshotsTestIamPermissionsCall { ++ c := &InstantSnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region ++ c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +@@ -113717,7 +117758,7 @@ func (r *InterconnectAttachmentsService) TestIamPermissions(project string, regi + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsTestIamPermissionsCall { ++func (c *InstantSnapshotsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstantSnapshotsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -113725,21 +117766,21 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Fi + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectAttachmentsTestIamPermissionsCall { ++func (c *InstantSnapshotsTestIamPermissionsCall) Context(ctx context.Context) *InstantSnapshotsTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { ++func (c *InstantSnapshotsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstantSnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -113754,7 +117795,7 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (* + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instantSnapshots/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -113763,20 +117804,20 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (* + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "region": c.region, ++ "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectAttachments.testIamPermissions" call. ++// Do executes the "compute.instantSnapshots.testIamPermissions" call. + // Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either + // *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *InstantSnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -113808,12 +117849,12 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", ++ // "flatPath": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/testIamPermissions", + // "httpMethod": "POST", +- // "id": "compute.interconnectAttachments.testIamPermissions", ++ // "id": "compute.instantSnapshots.testIamPermissions", + // "parameterOrder": [ + // "project", +- // "region", ++ // "zone", + // "resource" + // ], + // "parameters": { +@@ -113824,22 +117865,22 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "The name of the region for this request.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", ++ // "zone": { ++ // "description": "The name of the zone for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", ++ // "path": "projects/{project}/zones/{zone}/instantSnapshots/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, +@@ -113855,171 +117896,9 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal + + } + +-// method id "compute.interconnectLocations.get": +- +-type InterconnectLocationsGetCall struct { +- s *Service +- project string +- interconnectLocation string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header +-} +- +-// Get: Returns the details for the specified interconnect location. +-// Gets a list of available interconnect locations by making a list() +-// request. +-// +-// - interconnectLocation: Name of the interconnect location to return. +-// - project: Project ID for this request. +-func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { +- c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.interconnectLocation = interconnectLocation +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *InterconnectLocationsGetCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } +- var body io.Reader = nil +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations/{interconnectLocation}") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "interconnectLocation": c.interconnectLocation, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.interconnectLocations.get" call. +-// Exactly one of *InterconnectLocation or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *InterconnectLocation.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &InterconnectLocation{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", +- // "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", +- // "httpMethod": "GET", +- // "id": "compute.interconnectLocations.get", +- // "parameterOrder": [ +- // "project", +- // "interconnectLocation" +- // ], +- // "parameters": { +- // "interconnectLocation": { +- // "description": "Name of the interconnect location to return.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", +- // "response": { +- // "$ref": "InterconnectLocation" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" +- // ] +- // } +- +-} +- +-// method id "compute.interconnectLocations.list": ++// method id "compute.interconnectAttachments.aggregatedList": + +-type InterconnectLocationsListCall struct { ++type InterconnectAttachmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -114028,12 +117907,12 @@ type InterconnectLocationsListCall struct { + header_ http.Header + } + +-// List: Retrieves the list of interconnect locations available to the +-// specified project. ++// AggregatedList: Retrieves an aggregated list of interconnect ++// attachments. + // + // - project: Project ID for this request. +-func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { +- c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { ++ c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -114073,18 +117952,31 @@ func (r *InterconnectLocationsService) List(project string) *InterconnectLocatio + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c + } + ++// IncludeAllScopes sets the optional parameter "includeAllScopes": ++// Indicates whether every visible scope for each scope type (zone, ++// region, global) should be included in the response. For new resource ++// types added after this field, the flag has no effect as new resource ++// types will always include every visible scope for each scope type in ++// response. For resource types which predate this field, if this flag ++// is omitted or false, only scopes of the scope types where the ++// resource type is expected to be found will be included. ++func (c *InterconnectAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InterconnectAttachmentsAggregatedListCall { ++ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++ return c ++} ++ + // MaxResults sets the optional parameter "maxResults": The maximum + // number of results per page that should be returned. If the number of + // available results is larger than `maxResults`, Compute Engine returns + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -114098,7 +117990,7 @@ func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *Interconne + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -114106,7 +117998,7 @@ func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLoc + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -114115,7 +118007,7 @@ func (c *InterconnectLocationsListCall) PageToken(pageToken string) *Interconnec + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -114123,7 +118015,7 @@ func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSucces + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -114133,7 +118025,7 @@ func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *Interconne + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -114141,21 +118033,21 @@ func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *Interconn + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { ++func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectLocationsListCall) Header() http.Header { ++func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -114168,7 +118060,7 @@ func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, e + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -114181,14 +118073,15 @@ func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, e + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnectLocations.list" call. +-// Exactly one of *InterconnectLocationList or error will be non-nil. +-// Any non-2xx status code is an error. Response headers are in either +-// *InterconnectLocationList.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { ++// Do executes the "compute.interconnectAttachments.aggregatedList" call. ++// Exactly one of *InterconnectAttachmentAggregatedList or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -114207,7 +118100,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InterconnectLocationList{ ++ ret := &InterconnectAttachmentAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -114219,10 +118112,10 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter + } + return ret, nil + // { +- // "description": "Retrieves the list of interconnect locations available to the specified project.", +- // "flatPath": "projects/{project}/global/interconnectLocations", ++ // "description": "Retrieves an aggregated list of interconnect attachments.", ++ // "flatPath": "projects/{project}/aggregated/interconnectAttachments", + // "httpMethod": "GET", +- // "id": "compute.interconnectLocations.list", ++ // "id": "compute.interconnectAttachments.aggregatedList", + // "parameterOrder": [ + // "project" + // ], +@@ -114232,6 +118125,11 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter + // "location": "query", + // "type": "string" + // }, ++ // "includeAllScopes": { ++ // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -114263,9 +118161,9 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/interconnectLocations", ++ // "path": "projects/{project}/aggregated/interconnectAttachments", + // "response": { +- // "$ref": "InterconnectLocationList" ++ // "$ref": "InterconnectAttachmentAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -114279,7 +118177,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { ++func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -114297,25 +118195,29 @@ func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*Inter + } + } + +-// method id "compute.interconnects.delete": ++// method id "compute.interconnectAttachments.delete": + +-type InterconnectsDeleteCall struct { +- s *Service +- project string +- interconnect string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InterconnectAttachmentsDeleteCall struct { ++ s *Service ++ project string ++ region string ++ interconnectAttachment string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified Interconnect. ++// Delete: Deletes the specified interconnect attachment. + // +-// - interconnect: Name of the interconnect to delete. +-// - project: Project ID for this request. +-func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { +- c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - interconnectAttachment: Name of the interconnect attachment to ++// delete. ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { ++ c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.interconnect = interconnect ++ c.region = region ++ c.interconnectAttachment = interconnectAttachment + return c + } + +@@ -114330,7 +118232,7 @@ func (r *InterconnectsService) Delete(project string, interconnect string) *Inte + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { ++func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -114338,7 +118240,7 @@ func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDele + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { ++func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -114346,21 +118248,21 @@ func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDel + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { ++func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsDeleteCall) Header() http.Header { ++func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -114370,7 +118272,7 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { +@@ -114378,20 +118280,21 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "interconnect": c.interconnect, ++ "project": c.project, ++ "region": c.region, ++ "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.delete" call. ++// Do executes the "compute.interconnectAttachments.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -114422,17 +118325,18 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + } + return ret, nil + // { +- // "description": "Deletes the specified Interconnect.", +- // "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ // "description": "Deletes the specified interconnect attachment.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "httpMethod": "DELETE", +- // "id": "compute.interconnects.delete", ++ // "id": "compute.interconnectAttachments.delete", + // "parameterOrder": [ + // "project", +- // "interconnect" ++ // "region", ++ // "interconnectAttachment" + // ], + // "parameters": { +- // "interconnect": { +- // "description": "Name of the interconnect to delete.", ++ // "interconnectAttachment": { ++ // "description": "Name of the interconnect attachment to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -114445,13 +118349,20 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/interconnects/{interconnect}", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "response": { + // "$ref": "Operation" + // }, +@@ -114463,34 +118374,37 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + +-// method id "compute.interconnects.get": ++// method id "compute.interconnectAttachments.get": + +-type InterconnectsGetCall struct { +- s *Service +- project string +- interconnect string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type InterconnectAttachmentsGetCall struct { ++ s *Service ++ project string ++ region string ++ interconnectAttachment string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified Interconnect. Get a list of available +-// Interconnects by making a list() request. ++// Get: Returns the specified interconnect attachment. + // +-// - interconnect: Name of the interconnect to return. +-// - project: Project ID for this request. +-func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { +- c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - interconnectAttachment: Name of the interconnect attachment to ++// return. ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { ++ c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.interconnect = interconnect ++ c.region = region ++ c.interconnectAttachment = interconnectAttachment + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { ++func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -114500,7 +118414,7 @@ func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCal + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { ++func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -114508,21 +118422,21 @@ func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCa + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { ++func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsGetCall) Header() http.Header { ++func (c *InterconnectAttachmentsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -114535,7 +118449,7 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -114543,20 +118457,21 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "interconnect": c.interconnect, ++ "project": c.project, ++ "region": c.region, ++ "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.get" call. +-// Exactly one of *Interconnect or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Interconnect.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { ++// Do executes the "compute.interconnectAttachments.get" call. ++// Exactly one of *InterconnectAttachment or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *InterconnectAttachment.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -114575,7 +118490,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Interconnect{ ++ ret := &InterconnectAttachment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -114587,17 +118502,18 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, + } + return ret, nil + // { +- // "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", +- // "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ // "description": "Returns the specified interconnect attachment.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "httpMethod": "GET", +- // "id": "compute.interconnects.get", ++ // "id": "compute.interconnectAttachments.get", + // "parameterOrder": [ + // "project", +- // "interconnect" ++ // "region", ++ // "interconnectAttachment" + // ], + // "parameters": { +- // "interconnect": { +- // "description": "Name of the interconnect to return.", ++ // "interconnectAttachment": { ++ // "description": "Name of the interconnect attachment to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -114609,173 +118525,18 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/global/interconnects/{interconnect}", +- // "response": { +- // "$ref": "Interconnect" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" +- // ] +- // } +- +-} +- +-// method id "compute.interconnects.getDiagnostics": +- +-type InterconnectsGetDiagnosticsCall struct { +- s *Service +- project string +- interconnect string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header +-} +- +-// GetDiagnostics: Returns the interconnectDiagnostics for the specified +-// Interconnect. +-// +-// - interconnect: Name of the interconnect resource to query. +-// - project: Project ID for this request. +-func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { +- c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.interconnect = interconnect +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } +- var body io.Reader = nil +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}/getDiagnostics") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "interconnect": c.interconnect, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.interconnects.getDiagnostics" call. +-// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be +-// non-nil. Any non-2xx status code is an error. Response headers are in +-// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or +-// (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &InterconnectsGetDiagnosticsResponse{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Returns the interconnectDiagnostics for the specified Interconnect.", +- // "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", +- // "httpMethod": "GET", +- // "id": "compute.interconnects.getDiagnostics", +- // "parameterOrder": [ +- // "project", +- // "interconnect" +- // ], +- // "parameters": { +- // "interconnect": { +- // "description": "Name of the interconnect resource to query.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" + // }, +- // "project": { +- // "description": "Project ID for this request.", ++ // "region": { ++ // "description": "Name of the region for this request.", + // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "response": { +- // "$ref": "InterconnectsGetDiagnosticsResponse" ++ // "$ref": "InterconnectAttachment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -114786,25 +118547,28 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int + + } + +-// method id "compute.interconnects.insert": ++// method id "compute.interconnectAttachments.insert": + +-type InterconnectsInsertCall struct { +- s *Service +- project string +- interconnect *Interconnect +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InterconnectAttachmentsInsertCall struct { ++ s *Service ++ project string ++ region string ++ interconnectattachment *InterconnectAttachment ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates an Interconnect in the specified project using the +-// data included in the request. ++// Insert: Creates an InterconnectAttachment in the specified project ++// using the data included in the request. + // + // - project: Project ID for this request. +-func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { +- c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region for this request. ++func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { ++ c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.interconnect = interconnect ++ c.region = region ++ c.interconnectattachment = interconnectattachment + return c + } + +@@ -114819,15 +118583,22 @@ func (r *InterconnectsService) Insert(project string, interconnect *Interconnect + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { ++func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } + ++// ValidateOnly sets the optional parameter "validateOnly": If true, the ++// request will not be committed. ++func (c *InterconnectAttachmentsInsertCall) ValidateOnly(validateOnly bool) *InterconnectAttachmentsInsertCall { ++ c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) ++ return c ++} ++ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { ++func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -114835,21 +118606,21 @@ func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsIns + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { ++func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsInsertCall) Header() http.Header { ++func (c *InterconnectAttachmentsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -114857,14 +118628,14 @@ func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -114873,18 +118644,19 @@ func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.insert" call. ++// Do executes the "compute.interconnectAttachments.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -114915,12 +118687,13 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, + } + return ret, nil + // { +- // "description": "Creates an Interconnect in the specified project using the data included in the request.", +- // "flatPath": "projects/{project}/global/interconnects", ++ // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", + // "httpMethod": "POST", +- // "id": "compute.interconnects.insert", ++ // "id": "compute.interconnectAttachments.insert", + // "parameterOrder": [ +- // "project" ++ // "project", ++ // "region" + // ], + // "parameters": { + // "project": { +@@ -114930,15 +118703,27 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" ++ // }, ++ // "validateOnly": { ++ // "description": "If true, the request will not be committed.", ++ // "location": "query", ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/interconnects", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments", + // "request": { +- // "$ref": "Interconnect" ++ // "$ref": "InterconnectAttachment" + // }, + // "response": { + // "$ref": "Operation" +@@ -114951,24 +118736,27 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + +-// method id "compute.interconnects.list": ++// method id "compute.interconnectAttachments.list": + +-type InterconnectsListCall struct { ++type InterconnectAttachmentsListCall struct { + s *Service + project string ++ region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// List: Retrieves the list of Interconnects available to the specified +-// project. ++// List: Retrieves the list of interconnect attachments contained within ++// the specified region. + // + // - project: Project ID for this request. +-func (r *InterconnectsService) List(project string) *InterconnectsListCall { +- c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region for this request. ++func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { ++ c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project ++ c.region = region + return c + } + +@@ -115007,7 +118795,7 @@ func (r *InterconnectsService) List(project string) *InterconnectsListCall { + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -115018,7 +118806,7 @@ func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -115032,7 +118820,7 @@ func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListC + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -115040,7 +118828,7 @@ func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -115049,7 +118837,7 @@ func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCa + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -115057,7 +118845,7 @@ func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -115067,7 +118855,7 @@ func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListC + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -115075,21 +118863,21 @@ func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsList + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { ++func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsListCall) Header() http.Header { ++func (c *InterconnectAttachmentsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -115102,7 +118890,7 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -115111,18 +118899,19 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.list" call. +-// Exactly one of *InterconnectList or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *InterconnectList.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.interconnectAttachments.list" call. ++// Exactly one of *InterconnectAttachmentList or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *InterconnectAttachmentList.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { ++func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115141,7 +118930,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InterconnectList{ ++ ret := &InterconnectAttachmentList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -115153,12 +118942,13 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL + } + return ret, nil + // { +- // "description": "Retrieves the list of Interconnects available to the specified project.", +- // "flatPath": "projects/{project}/global/interconnects", ++ // "description": "Retrieves the list of interconnect attachments contained within the specified region.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments", + // "httpMethod": "GET", +- // "id": "compute.interconnects.list", ++ // "id": "compute.interconnectAttachments.list", + // "parameterOrder": [ +- // "project" ++ // "project", ++ // "region" + // ], + // "parameters": { + // "filter": { +@@ -115191,15 +118981,22 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/interconnects", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments", + // "response": { +- // "$ref": "InterconnectList" ++ // "$ref": "InterconnectAttachmentList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -115213,7 +119010,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { ++func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -115231,29 +119028,33 @@ func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectL + } + } + +-// method id "compute.interconnects.patch": ++// method id "compute.interconnectAttachments.patch": + +-type InterconnectsPatchCall struct { +- s *Service +- project string +- interconnect string +- interconnect2 *Interconnect +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InterconnectAttachmentsPatchCall struct { ++ s *Service ++ project string ++ region string ++ interconnectAttachment string ++ interconnectattachment *InterconnectAttachment ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Patch: Updates the specified Interconnect with the data included in +-// the request. This method supports PATCH semantics and uses the JSON +-// merge patch format and processing rules. ++// Patch: Updates the specified interconnect attachment with the data ++// included in the request. This method supports PATCH semantics and ++// uses the JSON merge patch format and processing rules. + // +-// - interconnect: Name of the interconnect to update. +-// - project: Project ID for this request. +-func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { +- c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - interconnectAttachment: Name of the interconnect attachment to ++// patch. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { ++ c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.interconnect = interconnect +- c.interconnect2 = interconnect2 ++ c.region = region ++ c.interconnectAttachment = interconnectAttachment ++ c.interconnectattachment = interconnectattachment + return c + } + +@@ -115268,7 +119069,7 @@ func (r *InterconnectsService) Patch(project string, interconnect string, interc + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { ++func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -115276,7 +119077,7 @@ func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatch + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { ++func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -115284,21 +119085,21 @@ func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatc + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { ++func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsPatchCall) Header() http.Header { ++func (c *InterconnectAttachmentsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -115306,14 +119107,14 @@ func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { +@@ -115321,20 +119122,21 @@ func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "interconnect": c.interconnect, ++ "project": c.project, ++ "region": c.region, ++ "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.patch" call. ++// Do executes the "compute.interconnectAttachments.patch" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115365,17 +119167,18 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e + } + return ret, nil + // { +- // "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", +- // "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "httpMethod": "PATCH", +- // "id": "compute.interconnects.patch", ++ // "id": "compute.interconnectAttachments.patch", + // "parameterOrder": [ + // "project", +- // "interconnect" ++ // "region", ++ // "interconnectAttachment" + // ], + // "parameters": { +- // "interconnect": { +- // "description": "Name of the interconnect to update.", ++ // "interconnectAttachment": { ++ // "description": "Name of the interconnect attachment to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -115388,15 +119191,22 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/interconnects/{interconnect}", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "request": { +- // "$ref": "Interconnect" ++ // "$ref": "InterconnectAttachment" + // }, + // "response": { + // "$ref": "Operation" +@@ -115409,35 +119219,54 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e + + } + +-// method id "compute.interconnects.setLabels": ++// method id "compute.interconnectAttachments.setLabels": + +-type InterconnectsSetLabelsCall struct { ++type InterconnectAttachmentsSetLabelsCall struct { + s *Service + project string ++ region string + resource string +- globalsetlabelsrequest *GlobalSetLabelsRequest ++ regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// SetLabels: Sets the labels on an Interconnect. To learn more about +-// labels, read the Labeling Resources documentation. ++// SetLabels: Sets the labels on an InterconnectAttachment. To learn ++// more about labels, read the Labeling Resources documentation. + // + // - project: Project ID for this request. ++// - region: The region for this request. + // - resource: Name or id of the resource for this request. +-func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { +- c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { ++ c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project ++ c.region = region + c.resource = resource +- c.globalsetlabelsrequest = globalsetlabelsrequest ++ c.regionsetlabelsrequest = regionsetlabelsrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { ++func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -115445,21 +119274,21 @@ func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *Interconnects + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { ++func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsSetLabelsCall) Header() http.Header { ++func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -115467,14 +119296,14 @@ func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, erro + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{resource}/setLabels") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -115483,19 +119312,20 @@ func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, erro + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, ++ "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.setLabels" call. ++// Do executes the "compute.interconnectAttachments.setLabels" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115526,12 +119356,13 @@ func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operatio + } + return ret, nil + // { +- // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", +- // "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", ++ // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + // "httpMethod": "POST", +- // "id": "compute.interconnects.setLabels", ++ // "id": "compute.interconnectAttachments.setLabels", + // "parameterOrder": [ + // "project", ++ // "region", + // "resource" + // ], + // "parameters": { +@@ -115542,17 +119373,29 @@ func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operatio + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "The region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/interconnects/{resource}/setLabels", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + // "request": { +- // "$ref": "GlobalSetLabelsRequest" ++ // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -115565,11 +119408,12 @@ func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operatio + + } + +-// method id "compute.interconnects.testIamPermissions": ++// method id "compute.interconnectAttachments.testIamPermissions": + +-type InterconnectsTestIamPermissionsCall struct { ++type InterconnectAttachmentsTestIamPermissionsCall struct { + s *Service + project string ++ region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams +@@ -115581,10 +119425,12 @@ type InterconnectsTestIamPermissionsCall struct { + // specified resource. + // + // - project: Project ID for this request. ++// - region: The name of the region for this request. + // - resource: Name or id of the resource for this request. +-func (r *InterconnectsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectsTestIamPermissionsCall { +- c := &InterconnectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectAttachmentsTestIamPermissionsCall { ++ c := &InterconnectAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project ++ c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +@@ -115593,7 +119439,7 @@ func (r *InterconnectsService) TestIamPermissions(project string, resource strin + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectsTestIamPermissionsCall { ++func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -115601,21 +119447,21 @@ func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Inte + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InterconnectsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectsTestIamPermissionsCall { ++func (c *InterconnectAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectAttachmentsTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { ++func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -115630,7 +119476,7 @@ func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Respo + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -115639,19 +119485,20 @@ func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Respo + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, ++ "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.interconnects.testIamPermissions" call. ++// Do executes the "compute.interconnectAttachments.testIamPermissions" call. + // Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either + // *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115683,11 +119530,12 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/global/interconnects/{resource}/testIamPermissions", ++ // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", + // "httpMethod": "POST", +- // "id": "compute.interconnects.testIamPermissions", ++ // "id": "compute.interconnectAttachments.testIamPermissions", + // "parameterOrder": [ + // "project", ++ // "region", + // "resource" + // ], + // "parameters": { +@@ -115698,15 +119546,22 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/interconnects/{resource}/testIamPermissions", ++ // "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, +@@ -115722,37 +119577,35 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + + } + +-// method id "compute.licenseCodes.get": ++// method id "compute.interconnectLocations.get": + +-type LicenseCodesGetCall struct { +- s *Service +- project string +- licenseCode string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type InterconnectLocationsGetCall struct { ++ s *Service ++ project string ++ interconnectLocation string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Return a specified license code. License codes are mirrored +-// across all projects that have permissions to read the License Code. +-// *Caution* This resource is intended for use only by third-party +-// partners who are creating Cloud Marketplace images. ++// Get: Returns the details for the specified interconnect location. ++// Gets a list of available interconnect locations by making a list() ++// request. + // +-// - licenseCode: Number corresponding to the License code resource to +-// return. +-// - project: Project ID for this request. +-func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { +- c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - interconnectLocation: Name of the interconnect location to return. ++// - project: Project ID for this request. ++func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { ++ c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.licenseCode = licenseCode ++ c.interconnectLocation = interconnectLocation + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { ++func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -115762,7 +119615,7 @@ func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { ++func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -115770,21 +119623,21 @@ func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { ++func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicenseCodesGetCall) Header() http.Header { ++func (c *InterconnectLocationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -115797,7 +119650,7 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{licenseCode}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations/{interconnectLocation}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -115805,20 +119658,20 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "licenseCode": c.licenseCode, ++ "project": c.project, ++ "interconnectLocation": c.interconnectLocation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenseCodes.get" call. +-// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *LicenseCode.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { ++// Do executes the "compute.interconnectLocations.get" call. ++// Exactly one of *InterconnectLocation or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *InterconnectLocation.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115837,7 +119690,7 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &LicenseCode{ ++ ret := &InterconnectLocation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -115849,19 +119702,19 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er + } + return ret, nil + // { +- // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenseCodes/{licenseCode}", ++ // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", ++ // "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", + // "httpMethod": "GET", +- // "id": "compute.licenseCodes.get", ++ // "id": "compute.interconnectLocations.get", + // "parameterOrder": [ + // "project", +- // "licenseCode" ++ // "interconnectLocation" + // ], + // "parameters": { +- // "licenseCode": { +- // "description": "Number corresponding to the License code resource to return.", ++ // "interconnectLocation": { ++ // "description": "Name of the interconnect location to return.", + // "location": "path", +- // "pattern": "[0-9]{0,61}?", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -115873,9 +119726,9 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenseCodes/{licenseCode}", ++ // "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", + // "response": { +- // "$ref": "LicenseCode" ++ // "$ref": "InterconnectLocation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -115886,94 +119739,382 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er + + } + +-// method id "compute.licenseCodes.testIamPermissions": ++// method id "compute.interconnectLocations.list": + +-type LicenseCodesTestIamPermissionsCall struct { +- s *Service +- project string +- resource string +- testpermissionsrequest *TestPermissionsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InterconnectLocationsListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. *Caution* This resource is intended for use only +-// by third-party partners who are creating Cloud Marketplace images. ++// List: Retrieves the list of interconnect locations available to the ++// specified project. + // + // - project: Project ID for this request. +-// - resource: Name or id of the resource for this request. +-func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { +- c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { ++ c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectLocationsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { ++func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { ++func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { ++func (c *InterconnectLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectLocations") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } +- reqHeaders.Set("Content-Type", "application/json") ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.interconnectLocations.list" call. ++// Exactly one of *InterconnectLocationList or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *InterconnectLocationList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &InterconnectLocationList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves the list of interconnect locations available to the specified project.", ++ // "flatPath": "projects/{project}/global/interconnectLocations", ++ // "httpMethod": "GET", ++ // "id": "compute.interconnectLocations.list", ++ // "parameterOrder": [ ++ // "project" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/global/interconnectLocations", ++ // "response": { ++ // "$ref": "InterconnectLocationList" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.interconnectRemoteLocations.get": ++ ++type InterconnectRemoteLocationsGetCall struct { ++ s *Service ++ project string ++ interconnectRemoteLocation string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Get: Returns the details for the specified interconnect remote ++// location. Gets a list of available interconnect remote locations by ++// making a list() request. ++// ++// - interconnectRemoteLocation: Name of the interconnect remote ++// location to return. ++// - project: Project ID for this request. ++func (r *InterconnectRemoteLocationsService) Get(project string, interconnectRemoteLocation string) *InterconnectRemoteLocationsGetCall { ++ c := &InterconnectRemoteLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.interconnectRemoteLocation = interconnectRemoteLocation ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InterconnectRemoteLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InterconnectRemoteLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InterconnectRemoteLocationsGetCall) Context(ctx context.Context) *InterconnectRemoteLocationsGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InterconnectRemoteLocationsGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InterconnectRemoteLocationsGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "resource": c.resource, ++ "project": c.project, ++ "interconnectRemoteLocation": c.interconnectRemoteLocation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenseCodes.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.interconnectRemoteLocations.get" call. ++// Exactly one of *InterconnectRemoteLocation or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *InterconnectRemoteLocation.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *InterconnectRemoteLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -115992,7 +120133,7 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &TestPermissionsResponse{ ++ ret := &InterconnectRemoteLocation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -116004,36 +120145,292 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", +- // "httpMethod": "POST", +- // "id": "compute.licenseCodes.testIamPermissions", ++ // "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", ++ // "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ // "httpMethod": "GET", ++ // "id": "compute.interconnectRemoteLocations.get", + // "parameterOrder": [ + // "project", +- // "resource" ++ // "interconnectRemoteLocation" + // ], + // "parameters": { ++ // "interconnectRemoteLocation": { ++ // "description": "Name of the interconnect remote location to return.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ // "response": { ++ // "$ref": "InterconnectRemoteLocation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.interconnectRemoteLocations.list": ++ ++type InterconnectRemoteLocationsListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// List: Retrieves the list of interconnect remote locations available ++// to the specified project. ++// ++// - project: Project ID for this request. ++func (r *InterconnectRemoteLocationsService) List(project string) *InterconnectRemoteLocationsListCall { ++ c := &InterconnectRemoteLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *InterconnectRemoteLocationsListCall) Filter(filter string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *InterconnectRemoteLocationsListCall) MaxResults(maxResults int64) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *InterconnectRemoteLocationsListCall) OrderBy(orderBy string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *InterconnectRemoteLocationsListCall) PageToken(pageToken string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *InterconnectRemoteLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InterconnectRemoteLocationsListCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InterconnectRemoteLocationsListCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InterconnectRemoteLocationsListCall) Context(ctx context.Context) *InterconnectRemoteLocationsListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InterconnectRemoteLocationsListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InterconnectRemoteLocationsListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.interconnectRemoteLocations.list" call. ++// Exactly one of *InterconnectRemoteLocationList or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *InterconnectRemoteLocationList.ServerResponse.Header or (if a ++// response was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectRemoteLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocationList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &InterconnectRemoteLocationList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves the list of interconnect remote locations available to the specified project.", ++ // "flatPath": "projects/{project}/global/interconnectRemoteLocations", ++ // "httpMethod": "GET", ++ // "id": "compute.interconnectRemoteLocations.list", ++ // "parameterOrder": [ ++ // "project" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", +- // "request": { +- // "$ref": "TestPermissionsRequest" +- // }, ++ // "path": "projects/{project}/global/interconnectRemoteLocations", + // "response": { +- // "$ref": "TestPermissionsResponse" ++ // "$ref": "InterconnectRemoteLocationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -116044,27 +120441,46 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* + + } + +-// method id "compute.licenses.delete": ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *InterconnectRemoteLocationsListCall) Pages(ctx context.Context, f func(*InterconnectRemoteLocationList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} + +-type LicensesDeleteCall struct { +- s *Service +- project string +- license string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++// method id "compute.interconnects.delete": ++ ++type InterconnectsDeleteCall struct { ++ s *Service ++ project string ++ interconnect string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified license. *Caution* This resource is +-// intended for use only by third-party partners who are creating Cloud +-// Marketplace images. ++// Delete: Deletes the specified Interconnect. + // +-// - license: Name of the license resource to delete. ++// - interconnect: Name of the interconnect to delete. + // - project: Project ID for this request. +-func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { +- c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { ++ c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.license = license ++ c.interconnect = interconnect + return c + } + +@@ -116079,7 +120495,7 @@ func (r *LicensesService) Delete(project string, license string) *LicensesDelete + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { ++func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -116087,7 +120503,7 @@ func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { ++func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -116095,21 +120511,21 @@ func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { ++func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesDeleteCall) Header() http.Header { ++func (c *InterconnectsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -116119,7 +120535,7 @@ func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { +@@ -116127,20 +120543,20 @@ func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "license": c.license, ++ "project": c.project, ++ "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.delete" call. ++// Do executes the "compute.interconnects.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -116171,17 +120587,17 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error + } + return ret, nil + // { +- // "description": "Deletes the specified license. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses/{license}", ++ // "description": "Deletes the specified Interconnect.", ++ // "flatPath": "projects/{project}/global/interconnects/{interconnect}", + // "httpMethod": "DELETE", +- // "id": "compute.licenses.delete", ++ // "id": "compute.interconnects.delete", + // "parameterOrder": [ + // "project", +- // "license" ++ // "interconnect" + // ], + // "parameters": { +- // "license": { +- // "description": "Name of the license resource to delete.", ++ // "interconnect": { ++ // "description": "Name of the interconnect to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -116200,7 +120616,7 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses/{license}", ++ // "path": "projects/{project}/global/interconnects/{interconnect}", + // "response": { + // "$ref": "Operation" + // }, +@@ -116212,35 +120628,34 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error + + } + +-// method id "compute.licenses.get": ++// method id "compute.interconnects.get": + +-type LicensesGetCall struct { ++type InterconnectsGetCall struct { + s *Service + project string +- license string ++ interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// Get: Returns the specified License resource. *Caution* This resource +-// is intended for use only by third-party partners who are creating +-// Cloud Marketplace images. ++// Get: Returns the specified Interconnect. Get a list of available ++// Interconnects by making a list() request. + // +-// - license: Name of the License resource to return. ++// - interconnect: Name of the interconnect to return. + // - project: Project ID for this request. +-func (r *LicensesService) Get(project string, license string) *LicensesGetCall { +- c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { ++ c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.license = license ++ c.interconnect = interconnect + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { ++func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -116250,7 +120665,7 @@ func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { ++func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -116258,21 +120673,21 @@ func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { ++func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesGetCall) Header() http.Header { ++func (c *InterconnectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -116285,7 +120700,7 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -116293,20 +120708,20 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "license": c.license, ++ "project": c.project, ++ "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.get" call. +-// Exactly one of *License or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *License.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { ++// Do executes the "compute.interconnects.get" call. ++// Exactly one of *Interconnect or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Interconnect.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -116325,7 +120740,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &License{ ++ ret := &Interconnect{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -116337,17 +120752,17 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + } + return ret, nil + // { +- // "description": "Returns the specified License resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses/{license}", ++ // "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", ++ // "flatPath": "projects/{project}/global/interconnects/{interconnect}", + // "httpMethod": "GET", +- // "id": "compute.licenses.get", ++ // "id": "compute.interconnects.get", + // "parameterOrder": [ + // "project", +- // "license" ++ // "interconnect" + // ], + // "parameters": { +- // "license": { +- // "description": "Name of the License resource to return.", ++ // "interconnect": { ++ // "description": "Name of the interconnect to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -116361,9 +120776,9 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses/{license}", ++ // "path": "projects/{project}/global/interconnects/{interconnect}", + // "response": { +- // "$ref": "License" ++ // "$ref": "Interconnect" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -116374,43 +120789,34 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + + } + +-// method id "compute.licenses.getIamPolicy": ++// method id "compute.interconnects.getDiagnostics": + +-type LicensesGetIamPolicyCall struct { ++type InterconnectsGetDiagnosticsCall struct { + s *Service + project string +- resource string ++ interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// GetIamPolicy: Gets the access control policy for a resource. May be +-// empty if no such policy or resource exists. *Caution* This resource +-// is intended for use only by third-party partners who are creating +-// Cloud Marketplace images. ++// GetDiagnostics: Returns the interconnectDiagnostics for the specified ++// Interconnect. + // ++// - interconnect: Name of the interconnect resource to query. + // - project: Project ID for this request. +-// - resource: Name or id of the resource for this request. +-func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { +- c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { ++ c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.resource = resource +- return c +-} +- +-// OptionsRequestedPolicyVersion sets the optional parameter +-// "optionsRequestedPolicyVersion": Requested IAM Policy version. +-func (c *LicensesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *LicensesGetIamPolicyCall { +- c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) ++ c.interconnect = interconnect + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { ++func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -116420,7 +120826,7 @@ func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamP + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { ++func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -116428,21 +120834,21 @@ func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIam + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { ++func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesGetIamPolicyCall) Header() http.Header { ++func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -116455,7 +120861,7 @@ func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/getIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}/getDiagnostics") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -116463,20 +120869,21 @@ func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "resource": c.resource, ++ "project": c.project, ++ "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.getIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++// Do executes the "compute.interconnects.getDiagnostics" call. ++// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -116495,7 +120902,7 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Policy{ ++ ret := &InterconnectsGetDiagnosticsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -116507,39 +120914,33 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + } + return ret, nil + // { +- // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ // "description": "Returns the interconnectDiagnostics for the specified Interconnect.", ++ // "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + // "httpMethod": "GET", +- // "id": "compute.licenses.getIamPolicy", ++ // "id": "compute.interconnects.getDiagnostics", + // "parameterOrder": [ + // "project", +- // "resource" ++ // "interconnect" + // ], + // "parameters": { +- // "optionsRequestedPolicyVersion": { +- // "description": "Requested IAM Policy version.", +- // "format": "int32", +- // "location": "query", +- // "type": "integer" +- // }, +- // "project": { +- // "description": "Project ID for this request.", ++ // "interconnect": { ++ // "description": "Name of the interconnect resource to query.", + // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", ++ // "project": { ++ // "description": "Project ID for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses/{resource}/getIamPolicy", ++ // "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + // "response": { +- // "$ref": "Policy" ++ // "$ref": "InterconnectsGetDiagnosticsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -116550,26 +120951,25 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + + } + +-// method id "compute.licenses.insert": ++// method id "compute.interconnects.insert": + +-type LicensesInsertCall struct { +- s *Service +- project string +- license *License +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type InterconnectsInsertCall struct { ++ s *Service ++ project string ++ interconnect *Interconnect ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Create a License resource in the specified project. *Caution* +-// This resource is intended for use only by third-party partners who +-// are creating Cloud Marketplace images. ++// Insert: Creates an Interconnect in the specified project using the ++// data included in the request. + // + // - project: Project ID for this request. +-func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { +- c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { ++ c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.license = license ++ c.interconnect = interconnect + return c + } + +@@ -116584,7 +120984,7 @@ func (r *LicensesService) Insert(project string, license *License) *LicensesInse + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { ++func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -116592,7 +120992,7 @@ func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { ++func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -116600,21 +121000,21 @@ func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { ++func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesInsertCall) Header() http.Header { ++func (c *InterconnectsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -116622,14 +121022,14 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -116642,14 +121042,14 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.insert" call. ++// Do executes the "compute.interconnects.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -116680,10 +121080,10 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error + } + return ret, nil + // { +- // "description": "Create a License resource in the specified project. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses", ++ // "description": "Creates an Interconnect in the specified project using the data included in the request.", ++ // "flatPath": "projects/{project}/global/interconnects", + // "httpMethod": "POST", +- // "id": "compute.licenses.insert", ++ // "id": "compute.interconnects.insert", + // "parameterOrder": [ + // "project" + // ], +@@ -116701,27 +121101,24 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses", ++ // "path": "projects/{project}/global/interconnects", + // "request": { +- // "$ref": "License" ++ // "$ref": "Interconnect" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/devstorage.full_control", +- // "https://www.googleapis.com/auth/devstorage.read_only", +- // "https://www.googleapis.com/auth/devstorage.read_write" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.licenses.list": ++// method id "compute.interconnects.list": + +-type LicensesListCall struct { ++type InterconnectsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -116730,18 +121127,12 @@ type LicensesListCall struct { + header_ http.Header + } + +-// List: Retrieves the list of licenses available in the specified +-// project. This method does not get any licenses that belong to other +-// projects, including licenses attached to publicly-available images, +-// like Debian 9. If you want to get a list of publicly-available +-// licenses, use this method to make a request to the respective image +-// project, such as debian-cloud or windows-cloud. *Caution* This +-// resource is intended for use only by third-party partners who are +-// creating Cloud Marketplace images. ++// List: Retrieves the list of Interconnects available to the specified ++// project. + // + // - project: Project ID for this request. +-func (r *LicensesService) List(project string) *LicensesListCall { +- c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) List(project string) *InterconnectsListCall { ++ c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -116781,7 +121172,7 @@ func (r *LicensesService) List(project string) *LicensesListCall { + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *LicensesListCall) Filter(filter string) *LicensesListCall { ++func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -116792,7 +121183,7 @@ func (c *LicensesListCall) Filter(filter string) *LicensesListCall { + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { ++func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -116806,7 +121197,7 @@ func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { ++func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -116814,7 +121205,7 @@ func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { ++func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -116823,7 +121214,7 @@ func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *LicensesListCall { ++func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -116831,7 +121222,7 @@ func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *Lice + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { ++func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -116841,7 +121232,7 @@ func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { ++func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -116849,21 +121240,21 @@ func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { ++func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesListCall) Header() http.Header { ++func (c *InterconnectsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -116876,7 +121267,7 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -116889,14 +121280,14 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.list" call. +-// Exactly one of *LicensesListResponse or error will be non-nil. Any ++// Do executes the "compute.interconnects.list" call. ++// Exactly one of *InterconnectList or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either +-// *LicensesListResponse.ServerResponse.Header or (if a response was ++// *InterconnectList.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { ++func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -116915,7 +121306,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &LicensesListResponse{ ++ ret := &InterconnectList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -116927,10 +121318,10 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon + } + return ret, nil + // { +- // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses", ++ // "description": "Retrieves the list of Interconnects available to the specified project.", ++ // "flatPath": "projects/{project}/global/interconnects", + // "httpMethod": "GET", +- // "id": "compute.licenses.list", ++ // "id": "compute.interconnects.list", + // "parameterOrder": [ + // "project" + // ], +@@ -116971,9 +121362,9 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/licenses", ++ // "path": "projects/{project}/global/interconnects", + // "response": { +- // "$ref": "LicensesListResponse" ++ // "$ref": "InterconnectList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -116987,7 +121378,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { ++func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -117005,37 +121396,213 @@ func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListRespon + } + } + +-// method id "compute.licenses.setIamPolicy": ++// method id "compute.interconnects.patch": + +-type LicensesSetIamPolicyCall struct { ++type InterconnectsPatchCall struct { ++ s *Service ++ project string ++ interconnect string ++ interconnect2 *Interconnect ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Patch: Updates the specified Interconnect with the data included in ++// the request. This method supports PATCH semantics and uses the JSON ++// merge patch format and processing rules. ++// ++// - interconnect: Name of the interconnect to update. ++// - project: Project ID for this request. ++func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { ++ c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.interconnect = interconnect ++ c.interconnect2 = interconnect2 ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InterconnectsPatchCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("PATCH", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "interconnect": c.interconnect, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.interconnects.patch" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", ++ // "flatPath": "projects/{project}/global/interconnects/{interconnect}", ++ // "httpMethod": "PATCH", ++ // "id": "compute.interconnects.patch", ++ // "parameterOrder": [ ++ // "project", ++ // "interconnect" ++ // ], ++ // "parameters": { ++ // "interconnect": { ++ // "description": "Name of the interconnect to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/interconnects/{interconnect}", ++ // "request": { ++ // "$ref": "Interconnect" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.interconnects.setLabels": ++ ++type InterconnectsSetLabelsCall struct { + s *Service + project string + resource string +- globalsetpolicyrequest *GlobalSetPolicyRequest ++ globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// SetIamPolicy: Sets the access control policy on the specified +-// resource. Replaces any existing policy. *Caution* This resource is +-// intended for use only by third-party partners who are creating Cloud +-// Marketplace images. ++// SetLabels: Sets the labels on an Interconnect. To learn more about ++// labels, read the Labeling Resources documentation. + // + // - project: Project ID for this request. + // - resource: Name or id of the resource for this request. +-func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { +- c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { ++ c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource +- c.globalsetpolicyrequest = globalsetpolicyrequest ++ c.globalsetlabelsrequest = globalsetlabelsrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { ++func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117043,21 +121610,21 @@ func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamP + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { ++func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesSetIamPolicyCall) Header() http.Header { ++func (c *InterconnectsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117065,14 +121632,14 @@ func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/setIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -117086,14 +121653,14 @@ func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.setIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++// Do executes the "compute.interconnects.setLabels" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117112,7 +121679,7 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Policy{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -117124,10 +121691,10 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + } + return ret, nil + // { +- // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", ++ // "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", + // "httpMethod": "POST", +- // "id": "compute.licenses.setIamPolicy", ++ // "id": "compute.interconnects.setLabels", + // "parameterOrder": [ + // "project", + // "resource" +@@ -117148,12 +121715,12 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses/{resource}/setIamPolicy", ++ // "path": "projects/{project}/global/interconnects/{resource}/setLabels", + // "request": { +- // "$ref": "GlobalSetPolicyRequest" ++ // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { +- // "$ref": "Policy" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -117163,9 +121730,9 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er + + } + +-// method id "compute.licenses.testIamPermissions": ++// method id "compute.interconnects.testIamPermissions": + +-type LicensesTestIamPermissionsCall struct { ++type InterconnectsTestIamPermissionsCall struct { + s *Service + project string + resource string +@@ -117176,13 +121743,12 @@ type LicensesTestIamPermissionsCall struct { + } + + // TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. *Caution* This resource is intended for use only +-// by third-party partners who are creating Cloud Marketplace images. ++// specified resource. + // + // - project: Project ID for this request. + // - resource: Name or id of the resource for this request. +-func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { +- c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *InterconnectsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectsTestIamPermissionsCall { ++ c := &InterconnectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest +@@ -117192,7 +121758,7 @@ func (r *LicensesService) TestIamPermissions(project string, resource string, te + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { ++func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117200,21 +121766,21 @@ func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesT + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { ++func (c *InterconnectsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectsTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *LicensesTestIamPermissionsCall) Header() http.Header { ++func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117229,7 +121795,7 @@ func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -117243,14 +121809,14 @@ func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.licenses.testIamPermissions" call. ++// Do executes the "compute.interconnects.testIamPermissions" call. + // Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either + // *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117281,10 +121847,10 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", +- // "flatPath": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/global/interconnects/{resource}/testIamPermissions", + // "httpMethod": "POST", +- // "id": "compute.licenses.testIamPermissions", ++ // "id": "compute.interconnects.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" +@@ -117305,7 +121871,7 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/licenses/{resource}/testIamPermissions", ++ // "path": "projects/{project}/global/interconnects/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, +@@ -117321,26 +121887,349 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test + + } + +-// method id "compute.machineImages.delete": ++// method id "compute.licenseCodes.get": + +-type MachineImagesDeleteCall struct { ++type LicenseCodesGetCall struct { + s *Service + project string +- machineImage string ++ licenseCode string + urlParams_ gensupport.URLParams ++ ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// Delete: Deletes the specified machine image. Deleting a machine image +-// is permanent and cannot be undone. ++// Get: Return a specified license code. License codes are mirrored ++// across all projects that have permissions to read the License Code. ++// *Caution* This resource is intended for use only by third-party ++// partners who are creating Cloud Marketplace images. ++// ++// - licenseCode: Number corresponding to the License code resource to ++// return. ++// - project: Project ID for this request. ++func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { ++ c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.licenseCode = licenseCode ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *LicenseCodesGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{licenseCode}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "licenseCode": c.licenseCode, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.licenseCodes.get" call. ++// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *LicenseCode.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &LicenseCode{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenseCodes/{licenseCode}", ++ // "httpMethod": "GET", ++ // "id": "compute.licenseCodes.get", ++ // "parameterOrder": [ ++ // "project", ++ // "licenseCode" ++ // ], ++ // "parameters": { ++ // "licenseCode": { ++ // "description": "Number corresponding to the License code resource to return.", ++ // "location": "path", ++ // "pattern": "[0-9]{0,61}?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/licenseCodes/{licenseCode}", ++ // "response": { ++ // "$ref": "LicenseCode" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.licenseCodes.testIamPermissions": ++ ++type LicenseCodesTestIamPermissionsCall struct { ++ s *Service ++ project string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. *Caution* This resource is intended for use only ++// by third-party partners who are creating Cloud Marketplace images. + // +-// - machineImage: The name of the machine image to delete. + // - project: Project ID for this request. +-func (r *MachineImagesService) Delete(project string, machineImage string) *MachineImagesDeleteCall { +- c := &MachineImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - resource: Name or id of the resource for this request. ++func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { ++ c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.machineImage = machineImage ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenseCodes/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.licenseCodes.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.licenseCodes.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/licenseCodes/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.licenses.delete": ++ ++type LicensesDeleteCall struct { ++ s *Service ++ project string ++ license string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Delete: Deletes the specified license. *Caution* This resource is ++// intended for use only by third-party partners who are creating Cloud ++// Marketplace images. ++// ++// - license: Name of the license resource to delete. ++// - project: Project ID for this request. ++func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { ++ c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.license = license + return c + } + +@@ -117355,7 +122244,7 @@ func (r *MachineImagesService) Delete(project string, machineImage string) *Mach + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *MachineImagesDeleteCall) RequestId(requestId string) *MachineImagesDeleteCall { ++func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -117363,7 +122252,7 @@ func (c *MachineImagesDeleteCall) RequestId(requestId string) *MachineImagesDele + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesDeleteCall) Fields(s ...googleapi.Field) *MachineImagesDeleteCall { ++func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117371,21 +122260,21 @@ func (c *MachineImagesDeleteCall) Fields(s ...googleapi.Field) *MachineImagesDel + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesDeleteCall) Context(ctx context.Context) *MachineImagesDeleteCall { ++func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesDeleteCall) Header() http.Header { ++func (c *LicensesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117395,7 +122284,7 @@ func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { +@@ -117403,20 +122292,20 @@ func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "machineImage": c.machineImage, ++ "project": c.project, ++ "license": c.license, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.delete" call. ++// Do executes the "compute.licenses.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117447,17 +122336,17 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + } + return ret, nil + // { +- // "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", +- // "flatPath": "projects/{project}/global/machineImages/{machineImage}", ++ // "description": "Deletes the specified license. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses/{license}", + // "httpMethod": "DELETE", +- // "id": "compute.machineImages.delete", ++ // "id": "compute.licenses.delete", + // "parameterOrder": [ + // "project", +- // "machineImage" ++ // "license" + // ], + // "parameters": { +- // "machineImage": { +- // "description": "The name of the machine image to delete.", ++ // "license": { ++ // "description": "Name of the license resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -117476,7 +122365,7 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages/{machineImage}", ++ // "path": "projects/{project}/global/licenses/{license}", + // "response": { + // "$ref": "Operation" + // }, +@@ -117488,33 +122377,35 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + +-// method id "compute.machineImages.get": ++// method id "compute.licenses.get": + +-type MachineImagesGetCall struct { ++type LicensesGetCall struct { + s *Service + project string +- machineImage string ++ license string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// Get: Returns the specified machine image. ++// Get: Returns the specified License resource. *Caution* This resource ++// is intended for use only by third-party partners who are creating ++// Cloud Marketplace images. + // +-// - machineImage: The name of the machine image. ++// - license: Name of the License resource to return. + // - project: Project ID for this request. +-func (r *MachineImagesService) Get(project string, machineImage string) *MachineImagesGetCall { +- c := &MachineImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) Get(project string, license string) *LicensesGetCall { ++ c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.machineImage = machineImage ++ c.license = license + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCall { ++func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117524,7 +122415,7 @@ func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCal + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCall { ++func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -117532,21 +122423,21 @@ func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCa + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesGetCall) Context(ctx context.Context) *MachineImagesGetCall { ++func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesGetCall) Header() http.Header { ++func (c *LicensesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117559,7 +122450,7 @@ func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{license}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -117567,20 +122458,20 @@ func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "machineImage": c.machineImage, ++ "project": c.project, ++ "license": c.license, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.get" call. +-// Exactly one of *MachineImage or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *MachineImage.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, error) { ++// Do executes the "compute.licenses.get" call. ++// Exactly one of *License or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *License.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117599,7 +122490,7 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &MachineImage{ ++ ret := &License{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -117611,17 +122502,17 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, + } + return ret, nil + // { +- // "description": "Returns the specified machine image.", +- // "flatPath": "projects/{project}/global/machineImages/{machineImage}", ++ // "description": "Returns the specified License resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses/{license}", + // "httpMethod": "GET", +- // "id": "compute.machineImages.get", ++ // "id": "compute.licenses.get", + // "parameterOrder": [ + // "project", +- // "machineImage" ++ // "license" + // ], + // "parameters": { +- // "machineImage": { +- // "description": "The name of the machine image.", ++ // "license": { ++ // "description": "Name of the License resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -117635,9 +122526,9 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages/{machineImage}", ++ // "path": "projects/{project}/global/licenses/{license}", + // "response": { +- // "$ref": "MachineImage" ++ // "$ref": "License" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -117648,9 +122539,9 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, + + } + +-// method id "compute.machineImages.getIamPolicy": ++// method id "compute.licenses.getIamPolicy": + +-type MachineImagesGetIamPolicyCall struct { ++type LicensesGetIamPolicyCall struct { + s *Service + project string + resource string +@@ -117661,12 +122552,14 @@ type MachineImagesGetIamPolicyCall struct { + } + + // GetIamPolicy: Gets the access control policy for a resource. May be +-// empty if no such policy or resource exists. ++// empty if no such policy or resource exists. *Caution* This resource ++// is intended for use only by third-party partners who are creating ++// Cloud Marketplace images. + // + // - project: Project ID for this request. + // - resource: Name or id of the resource for this request. +-func (r *MachineImagesService) GetIamPolicy(project string, resource string) *MachineImagesGetIamPolicyCall { +- c := &MachineImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { ++ c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + return c +@@ -117674,7 +122567,7 @@ func (r *MachineImagesService) GetIamPolicy(project string, resource string) *Ma + + // OptionsRequestedPolicyVersion sets the optional parameter + // "optionsRequestedPolicyVersion": Requested IAM Policy version. +-func (c *MachineImagesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *MachineImagesGetIamPolicyCall { ++func (c *LicensesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *LicensesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c + } +@@ -117682,7 +122575,7 @@ func (c *MachineImagesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsReq + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesGetIamPolicyCall { ++func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117692,7 +122585,7 @@ func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineIma + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineImagesGetIamPolicyCall { ++func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -117700,21 +122593,21 @@ func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineIm + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesGetIamPolicyCall) Context(ctx context.Context) *MachineImagesGetIamPolicyCall { ++func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesGetIamPolicyCall) Header() http.Header { ++func (c *LicensesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117727,7 +122620,7 @@ func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, e + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/getIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -117741,14 +122634,14 @@ func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, e + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.getIamPolicy" call. ++// Do executes the "compute.licenses.getIamPolicy" call. + // Exactly one of *Policy or error will be non-nil. Any non-2xx status + // code is an error. Response headers are in either + // *Policy.ServerResponse.Header or (if a response was returned at all) + // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to + // check whether the returned error was because http.StatusNotModified + // was returned. +-func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117779,10 +122672,10 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + } + return ret, nil + // { +- // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", +- // "flatPath": "projects/{project}/global/machineImages/{resource}/getIamPolicy", ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses/{resource}/getIamPolicy", + // "httpMethod": "GET", +- // "id": "compute.machineImages.getIamPolicy", ++ // "id": "compute.licenses.getIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" +@@ -117809,7 +122702,7 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages/{resource}/getIamPolicy", ++ // "path": "projects/{project}/global/licenses/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, +@@ -117822,28 +122715,26 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + + } + +-// method id "compute.machineImages.insert": ++// method id "compute.licenses.insert": + +-type MachineImagesInsertCall struct { +- s *Service +- project string +- machineimage *MachineImage +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type LicensesInsertCall struct { ++ s *Service ++ project string ++ license *License ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates a machine image in the specified project using the +-// data that is included in the request. If you are creating a new +-// machine image to update an existing instance, your new machine image +-// should use the same network or, if applicable, the same subnetwork as +-// the original instance. ++// Insert: Create a License resource in the specified project. *Caution* ++// This resource is intended for use only by third-party partners who ++// are creating Cloud Marketplace images. + // + // - project: Project ID for this request. +-func (r *MachineImagesService) Insert(project string, machineimage *MachineImage) *MachineImagesInsertCall { +- c := &MachineImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { ++ c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.machineimage = machineimage ++ c.license = license + return c + } + +@@ -117858,23 +122749,15 @@ func (r *MachineImagesService) Insert(project string, machineimage *MachineImage + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *MachineImagesInsertCall) RequestId(requestId string) *MachineImagesInsertCall { ++func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } + +-// SourceInstance sets the optional parameter "sourceInstance": +-// Required. Source instance that is used to create the machine image +-// from. +-func (c *MachineImagesInsertCall) SourceInstance(sourceInstance string) *MachineImagesInsertCall { +- c.urlParams_.Set("sourceInstance", sourceInstance) +- return c +-} +- + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesInsertCall) Fields(s ...googleapi.Field) *MachineImagesInsertCall { ++func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -117882,21 +122765,21 @@ func (c *MachineImagesInsertCall) Fields(s ...googleapi.Field) *MachineImagesIns + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesInsertCall) Context(ctx context.Context) *MachineImagesInsertCall { ++func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesInsertCall) Header() http.Header { ++func (c *LicensesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -117904,14 +122787,14 @@ func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.machineimage) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -117924,14 +122807,14 @@ func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.insert" call. ++// Do executes the "compute.licenses.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -117962,10 +122845,10 @@ func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, + } + return ret, nil + // { +- // "description": "Creates a machine image in the specified project using the data that is included in the request. If you are creating a new machine image to update an existing instance, your new machine image should use the same network or, if applicable, the same subnetwork as the original instance.", +- // "flatPath": "projects/{project}/global/machineImages", ++ // "description": "Create a License resource in the specified project. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses", + // "httpMethod": "POST", +- // "id": "compute.machineImages.insert", ++ // "id": "compute.licenses.insert", + // "parameterOrder": [ + // "project" + // ], +@@ -117981,31 +122864,29 @@ func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" +- // }, +- // "sourceInstance": { +- // "description": "Required. Source instance that is used to create the machine image from.", +- // "location": "query", +- // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages", ++ // "path": "projects/{project}/global/licenses", + // "request": { +- // "$ref": "MachineImage" ++ // "$ref": "License" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/devstorage.full_control", ++ // "https://www.googleapis.com/auth/devstorage.read_only", ++ // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + + } + +-// method id "compute.machineImages.list": ++// method id "compute.licenses.list": + +-type MachineImagesListCall struct { ++type LicensesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -118014,12 +122895,18 @@ type MachineImagesListCall struct { + header_ http.Header + } + +-// List: Retrieves a list of machine images that are contained within +-// the specified project. ++// List: Retrieves the list of licenses available in the specified ++// project. This method does not get any licenses that belong to other ++// projects, including licenses attached to publicly-available images, ++// like Debian 9. If you want to get a list of publicly-available ++// licenses, use this method to make a request to the respective image ++// project, such as debian-cloud or windows-cloud. *Caution* This ++// resource is intended for use only by third-party partners who are ++// creating Cloud Marketplace images. + // + // - project: Project ID for this request. +-func (r *MachineImagesService) List(project string) *MachineImagesListCall { +- c := &MachineImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) List(project string) *LicensesListCall { ++ c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -118059,7 +122946,7 @@ func (r *MachineImagesService) List(project string) *MachineImagesListCall { + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { ++func (c *LicensesListCall) Filter(filter string) *LicensesListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -118070,7 +122957,7 @@ func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListCall { ++func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -118084,7 +122971,7 @@ func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListC + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { ++func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -118092,7 +122979,7 @@ func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCall { ++func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -118101,7 +122988,7 @@ func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCa + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *MachineImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineImagesListCall { ++func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *LicensesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -118109,7 +122996,7 @@ func (c *MachineImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListCall { ++func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -118119,7 +123006,7 @@ func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListC + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesListCall { ++func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -118127,21 +123014,21 @@ func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesList + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesListCall) Context(ctx context.Context) *MachineImagesListCall { ++func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesListCall) Header() http.Header { ++func (c *LicensesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -118154,7 +123041,7 @@ func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -118167,14 +123054,14 @@ func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.list" call. +-// Exactly one of *MachineImageList or error will be non-nil. Any ++// Do executes the "compute.licenses.list" call. ++// Exactly one of *LicensesListResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either +-// *MachineImageList.ServerResponse.Header or (if a response was ++// *LicensesListResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageList, error) { ++func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -118193,7 +123080,7 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &MachineImageList{ ++ ret := &LicensesListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -118205,10 +123092,10 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL + } + return ret, nil + // { +- // "description": "Retrieves a list of machine images that are contained within the specified project.", +- // "flatPath": "projects/{project}/global/machineImages", ++ // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses", + // "httpMethod": "GET", +- // "id": "compute.machineImages.list", ++ // "id": "compute.licenses.list", + // "parameterOrder": [ + // "project" + // ], +@@ -118249,9 +123136,9 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/global/machineImages", ++ // "path": "projects/{project}/global/licenses", + // "response": { +- // "$ref": "MachineImageList" ++ // "$ref": "LicensesListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -118265,7 +123152,7 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageList) error) error { ++func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -118283,9 +123170,9 @@ func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageL + } + } + +-// method id "compute.machineImages.setIamPolicy": ++// method id "compute.licenses.setIamPolicy": + +-type MachineImagesSetIamPolicyCall struct { ++type LicensesSetIamPolicyCall struct { + s *Service + project string + resource string +@@ -118296,12 +123183,14 @@ type MachineImagesSetIamPolicyCall struct { + } + + // SetIamPolicy: Sets the access control policy on the specified +-// resource. Replaces any existing policy. ++// resource. Replaces any existing policy. *Caution* This resource is ++// intended for use only by third-party partners who are creating Cloud ++// Marketplace images. + // + // - project: Project ID for this request. + // - resource: Name or id of the resource for this request. +-func (r *MachineImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *MachineImagesSetIamPolicyCall { +- c := &MachineImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { ++ c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest +@@ -118311,7 +123200,7 @@ func (r *MachineImagesService) SetIamPolicy(project string, resource string, glo + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesSetIamPolicyCall { ++func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -118319,21 +123208,21 @@ func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineIma + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesSetIamPolicyCall) Context(ctx context.Context) *MachineImagesSetIamPolicyCall { ++func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesSetIamPolicyCall) Header() http.Header { ++func (c *LicensesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -118348,7 +123237,7 @@ func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, e + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/setIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -118362,14 +123251,14 @@ func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, e + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.setIamPolicy" call. ++// Do executes the "compute.licenses.setIamPolicy" call. + // Exactly one of *Policy or error will be non-nil. Any non-2xx status + // code is an error. Response headers are in either + // *Policy.ServerResponse.Header or (if a response was returned at all) + // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to + // check whether the returned error was because http.StatusNotModified + // was returned. +-func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -118400,10 +123289,10 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + } + return ret, nil + // { +- // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", +- // "flatPath": "projects/{project}/global/machineImages/{resource}/setIamPolicy", ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses/{resource}/setIamPolicy", + // "httpMethod": "POST", +- // "id": "compute.machineImages.setIamPolicy", ++ // "id": "compute.licenses.setIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" +@@ -118424,7 +123313,7 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages/{resource}/setIamPolicy", ++ // "path": "projects/{project}/global/licenses/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, +@@ -118439,9 +123328,9 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic + + } + +-// method id "compute.machineImages.testIamPermissions": ++// method id "compute.licenses.testIamPermissions": + +-type MachineImagesTestIamPermissionsCall struct { ++type LicensesTestIamPermissionsCall struct { + s *Service + project string + resource string +@@ -118452,12 +123341,13 @@ type MachineImagesTestIamPermissionsCall struct { + } + + // TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. ++// specified resource. *Caution* This resource is intended for use only ++// by third-party partners who are creating Cloud Marketplace images. + // + // - project: Project ID for this request. + // - resource: Name or id of the resource for this request. +-func (r *MachineImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *MachineImagesTestIamPermissionsCall { +- c := &MachineImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { ++ c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest +@@ -118467,7 +123357,7 @@ func (r *MachineImagesService) TestIamPermissions(project string, resource strin + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *MachineImagesTestIamPermissionsCall { ++func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -118475,21 +123365,21 @@ func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Mach + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineImagesTestIamPermissionsCall) Context(ctx context.Context) *MachineImagesTestIamPermissionsCall { ++func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineImagesTestIamPermissionsCall) Header() http.Header { ++func (c *LicensesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -118504,7 +123394,7 @@ func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Respo + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/licenses/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -118518,14 +123408,14 @@ func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Respo + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineImages.testIamPermissions" call. ++// Do executes the "compute.licenses.testIamPermissions" call. + // Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either + // *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -118556,10 +123446,10 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/global/machineImages/{resource}/testIamPermissions", ++ // "description": "Returns permissions that a caller has on the specified resource. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", ++ // "flatPath": "projects/{project}/global/licenses/{resource}/testIamPermissions", + // "httpMethod": "POST", +- // "id": "compute.machineImages.testIamPermissions", ++ // "id": "compute.licenses.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" +@@ -118580,7 +123470,7 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/machineImages/{resource}/testIamPermissions", ++ // "path": "projects/{project}/global/licenses/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, +@@ -118596,125 +123486,200 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( + + } + +-// method id "compute.machineTypes.aggregatedList": ++// method id "compute.machineImages.delete": + +-type MachineTypesAggregatedListCall struct { ++type MachineImagesDeleteCall struct { + s *Service + project string ++ machineImage string + urlParams_ gensupport.URLParams +- ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// AggregatedList: Retrieves an aggregated list of machine types. ++// Delete: Deletes the specified machine image. Deleting a machine image ++// is permanent and cannot be undone. + // ++// - machineImage: The name of the machine image to delete. + // - project: Project ID for this request. +-func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { +- c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *MachineImagesService) Delete(project string, machineImage string) *MachineImagesDeleteCall { ++ c := &MachineImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project ++ c.machineImage = machineImage + return c + } + +-// Filter sets the optional parameter "filter": A filter expression that +-// filters resources listed in the response. Most Compute resources +-// support two types of filter expressions: expressions that support +-// regular expressions and expressions that follow API improvement +-// proposal AIP-160. If you want to use AIP-160, your expression must +-// specify the field name, an operator, and the value that you want to +-// use for filtering. The value must be a string, a number, or a +-// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +-// or `:`. For example, if you are filtering Compute Engine instances, +-// you can exclude instances named `example-instance` by specifying +-// `name != example-instance`. The `:` operator can be used with string +-// fields to match substrings. For non-string fields it is equivalent to +-// the `=` operator. The `:*` comparison can be used to test whether a +-// key has been defined. For example, to find all objects with `owner` +-// label use: ``` labels.owner:* ``` You can also filter nested fields. +-// For example, you could specify `scheduling.automaticRestart = false` +-// to include instances only if they are not scheduled for automatic +-// restarts. You can use filtering on nested fields to filter based on +-// resource labels. To filter on multiple expressions, provide each +-// separate expression within parentheses. For example: ``` +-// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +-// ``` By default, each expression is an `AND` expression. However, you +-// can include `AND` and `OR` expressions explicitly. For example: ``` +-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +-// AND (scheduling.automaticRestart = true) ``` If you want to use a +-// regular expression, use the `eq` (equal) or `ne` (not equal) operator +-// against a single un-parenthesized expression with or without quotes +-// or against multiple parenthesized expressions. Examples: `fieldname +-// eq unquoted literal` `fieldname eq 'single quoted literal'` +-// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +-// (fieldname2 ne "literal")` The literal value is interpreted as a +-// regular expression using Google RE2 library syntax. The literal value +-// must match the entire field. For example, to filter for instances +-// that do not end with name "instance", you would use `name ne +-// .*instance`. +-func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("filter", filter) ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *MachineImagesDeleteCall) RequestId(requestId string) *MachineImagesDeleteCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + +-// IncludeAllScopes sets the optional parameter "includeAllScopes": +-// Indicates whether every visible scope for each scope type (zone, +-// region, global) should be included in the response. For new resource +-// types added after this field, the flag has no effect as new resource +-// types will always include every visible scope for each scope type in +-// response. For resource types which predate this field, if this flag +-// is omitted or false, only scopes of the scope types where the +-// resource type is expected to be found will be included. +-func (c *MachineTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *MachineImagesDeleteCall) Fields(s ...googleapi.Field) *MachineImagesDeleteCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// MaxResults sets the optional parameter "maxResults": The maximum +-// number of results per page that should be returned. If the number of +-// available results is larger than `maxResults`, Compute Engine returns +-// a `nextPageToken` that can be used to get the next page of results in +-// subsequent list requests. Acceptable values are `0` to `500`, +-// inclusive. (Default: `500`) +-func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *MachineImagesDeleteCall) Context(ctx context.Context) *MachineImagesDeleteCall { ++ c.ctx_ = ctx + return c + } + +-// OrderBy sets the optional parameter "orderBy": Sorts list results by +-// a certain order. By default, results are returned in alphanumerical +-// order based on the resource name. You can also sort results in +-// descending order based on the creation timestamp using +-// `orderBy="creationTimestamp desc". This sorts results based on the +-// `creationTimestamp` field in reverse chronological order (newest +-// result first). Use this to sort resources like operations so that the +-// newest operation is returned first. Currently, only sorting by `name` +-// or `creationTimestamp desc` is supported. +-func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("orderBy", orderBy) +- return c ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *MachineImagesDeleteCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ + } + +-// PageToken sets the optional parameter "pageToken": Specifies a page +-// token to use. Set `pageToken` to the `nextPageToken` returned by a +-// previous list request to get the next page of results. +-func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("pageToken", pageToken) +- return c ++func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("DELETE", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "machineImage": c.machineImage, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// ReturnPartialSuccess sets the optional parameter +-// "returnPartialSuccess": Opt-in for partial success behavior which +-// provides partial results in case of failure. The default value is +-// false. +-func (c *MachineTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesAggregatedListCall { +- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++// Do executes the "compute.machineImages.delete" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", ++ // "flatPath": "projects/{project}/global/machineImages/{machineImage}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.machineImages.delete", ++ // "parameterOrder": [ ++ // "project", ++ // "machineImage" ++ // ], ++ // "parameters": { ++ // "machineImage": { ++ // "description": "The name of the machine image to delete.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/machineImages/{machineImage}", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.machineImages.get": ++ ++type MachineImagesGetCall struct { ++ s *Service ++ project string ++ machineImage string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Get: Returns the specified machine image. ++// ++// - machineImage: The name of the machine image. ++// - project: Project ID for this request. ++func (r *MachineImagesService) Get(project string, machineImage string) *MachineImagesGetCall { ++ c := &MachineImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.machineImage = machineImage + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { ++func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -118724,7 +123689,7 @@ func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTy + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { ++func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -118732,21 +123697,21 @@ func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineT + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { ++func (c *MachineImagesGetCall) Context(ctx context.Context) *MachineImagesGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineTypesAggregatedListCall) Header() http.Header { ++func (c *MachineImagesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -118759,7 +123724,7 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/machineTypes") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{machineImage}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -118767,19 +123732,20 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, ++ "project": c.project, ++ "machineImage": c.machineImage, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineTypes.aggregatedList" call. +-// Exactly one of *MachineTypeAggregatedList or error will be non-nil. +-// Any non-2xx status code is an error. Response headers are in either +-// *MachineTypeAggregatedList.ServerResponse.Header or (if a response +-// was returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { ++// Do executes the "compute.machineImages.get" call. ++// Exactly one of *MachineImage or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *MachineImage.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -118798,7 +123764,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &MachineTypeAggregatedList{ ++ ret := &MachineImage{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -118810,40 +123776,20 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach + } + return ret, nil + // { +- // "description": "Retrieves an aggregated list of machine types.", +- // "flatPath": "projects/{project}/aggregated/machineTypes", ++ // "description": "Returns the specified machine image.", ++ // "flatPath": "projects/{project}/global/machineImages/{machineImage}", + // "httpMethod": "GET", +- // "id": "compute.machineTypes.aggregatedList", ++ // "id": "compute.machineImages.get", + // "parameterOrder": [ +- // "project" ++ // "project", ++ // "machineImage" + // ], + // "parameters": { +- // "filter": { +- // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- // "location": "query", +- // "type": "string" +- // }, +- // "includeAllScopes": { +- // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", +- // "location": "query", +- // "type": "boolean" +- // }, +- // "maxResults": { +- // "default": "500", +- // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- // "format": "uint32", +- // "location": "query", +- // "minimum": "0", +- // "type": "integer" +- // }, +- // "orderBy": { +- // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- // "location": "query", +- // "type": "string" +- // }, +- // "pageToken": { +- // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +- // "location": "query", ++ // "machineImage": { ++ // "description": "The name of the machine image.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, + // "type": "string" + // }, + // "project": { +@@ -118852,16 +123798,11 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" +- // }, +- // "returnPartialSuccess": { +- // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", +- // "location": "query", +- // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/aggregated/machineTypes", ++ // "path": "projects/{project}/global/machineImages/{machineImage}", + // "response": { +- // "$ref": "MachineTypeAggregatedList" ++ // "$ref": "MachineImage" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -118872,57 +123813,41 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach + + } + +-// Pages invokes f for each page of results. +-// A non-nil error returned from f will halt the iteration. +-// The provided context supersedes any context provided to the Context method. +-func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { +- c.ctx_ = ctx +- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point +- for { +- x, err := c.Do() +- if err != nil { +- return err +- } +- if err := f(x); err != nil { +- return err +- } +- if x.NextPageToken == "" { +- return nil +- } +- c.PageToken(x.NextPageToken) +- } +-} +- +-// method id "compute.machineTypes.get": ++// method id "compute.machineImages.getIamPolicy": + +-type MachineTypesGetCall struct { ++type MachineImagesGetIamPolicyCall struct { + s *Service + project string +- zone string +- machineType string ++ resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// Get: Returns the specified machine type. ++// GetIamPolicy: Gets the access control policy for a resource. May be ++// empty if no such policy or resource exists. + // +-// - machineType: Name of the machine type to return. + // - project: Project ID for this request. +-// - zone: The name of the zone for this request. +-func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { +- c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - resource: Name or id of the resource for this request. ++func (r *MachineImagesService) GetIamPolicy(project string, resource string) *MachineImagesGetIamPolicyCall { ++ c := &MachineImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.machineType = machineType ++ c.resource = resource ++ return c ++} ++ ++// OptionsRequestedPolicyVersion sets the optional parameter ++// "optionsRequestedPolicyVersion": Requested IAM Policy version. ++func (c *MachineImagesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *MachineImagesGetIamPolicyCall { ++ c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { ++func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -118932,7 +123857,7 @@ func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { ++func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineImagesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -118940,21 +123865,21 @@ func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { ++func (c *MachineImagesGetIamPolicyCall) Context(ctx context.Context) *MachineImagesGetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineTypesGetCall) Header() http.Header { ++func (c *MachineImagesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -118967,7 +123892,7 @@ func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes/{machineType}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -118975,21 +123900,20 @@ func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "machineType": c.machineType, ++ "project": c.project, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineTypes.get" call. +-// Exactly one of *MachineType or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *MachineType.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { ++// Do executes the "compute.machineImages.getIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -119008,7 +123932,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &MachineType{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -119020,22 +123944,20 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er + } + return ret, nil + // { +- // "description": "Returns the specified machine type.", +- // "flatPath": "projects/{project}/zones/{zone}/machineTypes/{machineType}", ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ // "flatPath": "projects/{project}/global/machineImages/{resource}/getIamPolicy", + // "httpMethod": "GET", +- // "id": "compute.machineTypes.get", ++ // "id": "compute.machineImages.getIamPolicy", + // "parameterOrder": [ + // "project", +- // "zone", +- // "machineType" ++ // "resource" + // ], + // "parameters": { +- // "machineType": { +- // "description": "Name of the machine type to return.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" ++ // "optionsRequestedPolicyVersion": { ++ // "description": "Requested IAM Policy version.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", +@@ -119044,17 +123966,17 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er + // "required": true, + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone for this request.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/machineTypes/{machineType}", ++ // "path": "projects/{project}/global/machineImages/{resource}/getIamPolicy", + // "response": { +- // "$ref": "MachineType" ++ // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -119065,182 +123987,116 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er + + } + +-// method id "compute.machineTypes.list": ++// method id "compute.machineImages.insert": + +-type MachineTypesListCall struct { ++type MachineImagesInsertCall struct { + s *Service + project string +- zone string ++ machineimage *MachineImage + urlParams_ gensupport.URLParams +- ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// List: Retrieves a list of machine types available to the specified +-// project. ++// Insert: Creates a machine image in the specified project using the ++// data that is included in the request. If you are creating a new ++// machine image to update an existing instance, your new machine image ++// should use the same network or, if applicable, the same subnetwork as ++// the original instance. + // + // - project: Project ID for this request. +-// - zone: The name of the zone for this request. +-func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { +- c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *MachineImagesService) Insert(project string, machineimage *MachineImage) *MachineImagesInsertCall { ++ c := &MachineImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- return c +-} +- +-// Filter sets the optional parameter "filter": A filter expression that +-// filters resources listed in the response. Most Compute resources +-// support two types of filter expressions: expressions that support +-// regular expressions and expressions that follow API improvement +-// proposal AIP-160. If you want to use AIP-160, your expression must +-// specify the field name, an operator, and the value that you want to +-// use for filtering. The value must be a string, a number, or a +-// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +-// or `:`. For example, if you are filtering Compute Engine instances, +-// you can exclude instances named `example-instance` by specifying +-// `name != example-instance`. The `:` operator can be used with string +-// fields to match substrings. For non-string fields it is equivalent to +-// the `=` operator. The `:*` comparison can be used to test whether a +-// key has been defined. For example, to find all objects with `owner` +-// label use: ``` labels.owner:* ``` You can also filter nested fields. +-// For example, you could specify `scheduling.automaticRestart = false` +-// to include instances only if they are not scheduled for automatic +-// restarts. You can use filtering on nested fields to filter based on +-// resource labels. To filter on multiple expressions, provide each +-// separate expression within parentheses. For example: ``` +-// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +-// ``` By default, each expression is an `AND` expression. However, you +-// can include `AND` and `OR` expressions explicitly. For example: ``` +-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +-// AND (scheduling.automaticRestart = true) ``` If you want to use a +-// regular expression, use the `eq` (equal) or `ne` (not equal) operator +-// against a single un-parenthesized expression with or without quotes +-// or against multiple parenthesized expressions. Examples: `fieldname +-// eq unquoted literal` `fieldname eq 'single quoted literal'` +-// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +-// (fieldname2 ne "literal")` The literal value is interpreted as a +-// regular expression using Google RE2 library syntax. The literal value +-// must match the entire field. For example, to filter for instances +-// that do not end with name "instance", you would use `name ne +-// .*instance`. +-func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { +- c.urlParams_.Set("filter", filter) +- return c +-} +- +-// MaxResults sets the optional parameter "maxResults": The maximum +-// number of results per page that should be returned. If the number of +-// available results is larger than `maxResults`, Compute Engine returns +-// a `nextPageToken` that can be used to get the next page of results in +-// subsequent list requests. Acceptable values are `0` to `500`, +-// inclusive. (Default: `500`) +-func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { +- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +- return c +-} +- +-// OrderBy sets the optional parameter "orderBy": Sorts list results by +-// a certain order. By default, results are returned in alphanumerical +-// order based on the resource name. You can also sort results in +-// descending order based on the creation timestamp using +-// `orderBy="creationTimestamp desc". This sorts results based on the +-// `creationTimestamp` field in reverse chronological order (newest +-// result first). Use this to sort resources like operations so that the +-// newest operation is returned first. Currently, only sorting by `name` +-// or `creationTimestamp desc` is supported. +-func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { +- c.urlParams_.Set("orderBy", orderBy) ++ c.machineimage = machineimage + return c + } + +-// PageToken sets the optional parameter "pageToken": Specifies a page +-// token to use. Set `pageToken` to the `nextPageToken` returned by a +-// previous list request to get the next page of results. +-func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { +- c.urlParams_.Set("pageToken", pageToken) ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *MachineImagesInsertCall) RequestId(requestId string) *MachineImagesInsertCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + +-// ReturnPartialSuccess sets the optional parameter +-// "returnPartialSuccess": Opt-in for partial success behavior which +-// provides partial results in case of failure. The default value is +-// false. +-func (c *MachineTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesListCall { +- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++// SourceInstance sets the optional parameter "sourceInstance": ++// Required. Source instance that is used to create the machine image ++// from. ++func (c *MachineImagesInsertCall) SourceInstance(sourceInstance string) *MachineImagesInsertCall { ++ c.urlParams_.Set("sourceInstance", sourceInstance) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { ++func (c *MachineImagesInsertCall) Fields(s ...googleapi.Field) *MachineImagesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { ++func (c *MachineImagesInsertCall) Context(ctx context.Context) *MachineImagesInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *MachineTypesListCall) Header() http.Header { ++func (c *MachineImagesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.machineimage) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.machineTypes.list" call. +-// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx ++// Do executes the "compute.machineImages.insert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either +-// *MachineTypeList.ServerResponse.Header or (if a response was returned +-// at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -119259,7 +124115,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &MachineTypeList{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -119271,38 +124127,14 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis + } + return ret, nil + // { +- // "description": "Retrieves a list of machine types available to the specified project.", +- // "flatPath": "projects/{project}/zones/{zone}/machineTypes", +- // "httpMethod": "GET", +- // "id": "compute.machineTypes.list", ++ // "description": "Creates a machine image in the specified project using the data that is included in the request. If you are creating a new machine image to update an existing instance, your new machine image should use the same network or, if applicable, the same subnetwork as the original instance.", ++ // "flatPath": "projects/{project}/global/machineImages", ++ // "httpMethod": "POST", ++ // "id": "compute.machineImages.insert", + // "parameterOrder": [ +- // "project", +- // "zone" ++ // "project" + // ], + // "parameters": { +- // "filter": { +- // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- // "location": "query", +- // "type": "string" +- // }, +- // "maxResults": { +- // "default": "500", +- // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- // "format": "uint32", +- // "location": "query", +- // "minimum": "0", +- // "type": "integer" +- // }, +- // "orderBy": { +- // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- // "location": "query", +- // "type": "string" +- // }, +- // "pageToken": { +- // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +- // "location": "query", +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -119310,56 +124142,35 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis + // "required": true, + // "type": "string" + // }, +- // "returnPartialSuccess": { +- // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", +- // "type": "boolean" ++ // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, ++ // "sourceInstance": { ++ // "description": "Required. Source instance that is used to create the machine image from.", ++ // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/machineTypes", ++ // "path": "projects/{project}/global/machineImages", ++ // "request": { ++ // "$ref": "MachineImage" ++ // }, + // "response": { +- // "$ref": "MachineTypeList" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// Pages invokes f for each page of results. +-// A non-nil error returned from f will halt the iteration. +-// The provided context supersedes any context provided to the Context method. +-func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { +- c.ctx_ = ctx +- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point +- for { +- x, err := c.Do() +- if err != nil { +- return err +- } +- if err := f(x); err != nil { +- return err +- } +- if x.NextPageToken == "" { +- return nil +- } +- c.PageToken(x.NextPageToken) +- } +-} +- +-// method id "compute.networkAttachments.aggregatedList": ++// method id "compute.machineImages.list": + +-type NetworkAttachmentsAggregatedListCall struct { ++type MachineImagesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -119368,12 +124179,12 @@ type NetworkAttachmentsAggregatedListCall struct { + header_ http.Header + } + +-// AggregatedList: Retrieves the list of all NetworkAttachment +-// resources, regional and global, available to the specified project. ++// List: Retrieves a list of machine images that are contained within ++// the specified project. + // + // - project: Project ID for this request. +-func (r *NetworkAttachmentsService) AggregatedList(project string) *NetworkAttachmentsAggregatedListCall { +- c := &NetworkAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *MachineImagesService) List(project string) *MachineImagesListCall { ++ c := &MachineImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -119413,31 +124224,18 @@ func (r *NetworkAttachmentsService) AggregatedList(project string) *NetworkAttac + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *NetworkAttachmentsAggregatedListCall) Filter(filter string) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { + c.urlParams_.Set("filter", filter) + return c + } + +-// IncludeAllScopes sets the optional parameter "includeAllScopes": +-// Indicates whether every visible scope for each scope type (zone, +-// region, global) should be included in the response. For new resource +-// types added after this field, the flag has no effect as new resource +-// types will always include every visible scope for each scope type in +-// response. For resource types which predate this field, if this flag +-// is omitted or false, only scopes of the scope types where the +-// resource type is expected to be found will be included. +-func (c *NetworkAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkAttachmentsAggregatedListCall { +- c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) +- return c +-} +- + // MaxResults sets the optional parameter "maxResults": The maximum + // number of results per page that should be returned. If the number of + // available results is larger than `maxResults`, Compute Engine returns + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *NetworkAttachmentsAggregatedListCall) MaxResults(maxResults int64) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -119451,7 +124249,7 @@ func (c *NetworkAttachmentsAggregatedListCall) MaxResults(maxResults int64) *Net + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *NetworkAttachmentsAggregatedListCall) OrderBy(orderBy string) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -119459,7 +124257,7 @@ func (c *NetworkAttachmentsAggregatedListCall) OrderBy(orderBy string) *NetworkA + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *NetworkAttachmentsAggregatedListCall) PageToken(pageToken string) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -119468,7 +124266,7 @@ func (c *NetworkAttachmentsAggregatedListCall) PageToken(pageToken string) *Netw + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *NetworkAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineImagesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -119476,7 +124274,7 @@ func (c *NetworkAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartia + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -119486,7 +124284,7 @@ func (c *NetworkAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *Net + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -119494,21 +124292,21 @@ func (c *NetworkAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *Ne + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsAggregatedListCall) Context(ctx context.Context) *NetworkAttachmentsAggregatedListCall { ++func (c *MachineImagesListCall) Context(ctx context.Context) *MachineImagesListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsAggregatedListCall) Header() http.Header { ++func (c *MachineImagesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -119521,7 +124319,7 @@ func (c *NetworkAttachmentsAggregatedListCall) doRequest(alt string) (*http.Resp + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -119534,14 +124332,14 @@ func (c *NetworkAttachmentsAggregatedListCall) doRequest(alt string) (*http.Resp + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.aggregatedList" call. +-// Exactly one of *NetworkAttachmentAggregatedList or error will be +-// non-nil. Any non-2xx status code is an error. Response headers are in +-// either *NetworkAttachmentAggregatedList.ServerResponse.Header or (if +-// a response was returned at all) in error.(*googleapi.Error).Header. +-// Use googleapi.IsNotModified to check whether the returned error was ++// Do executes the "compute.machineImages.list" call. ++// Exactly one of *MachineImageList or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *MachineImageList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentAggregatedList, error) { ++func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -119560,7 +124358,7 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkAttachmentAggregatedList{ ++ ret := &MachineImageList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -119572,10 +124370,10 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Retrieves the list of all NetworkAttachment resources, regional and global, available to the specified project.", +- // "flatPath": "projects/{project}/aggregated/networkAttachments", ++ // "description": "Retrieves a list of machine images that are contained within the specified project.", ++ // "flatPath": "projects/{project}/global/machineImages", + // "httpMethod": "GET", +- // "id": "compute.networkAttachments.aggregatedList", ++ // "id": "compute.machineImages.list", + // "parameterOrder": [ + // "project" + // ], +@@ -119585,11 +124383,6 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) + // "location": "query", + // "type": "string" + // }, +- // "includeAllScopes": { +- // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -119621,9 +124414,9 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/aggregated/networkAttachments", ++ // "path": "projects/{project}/global/machineImages", + // "response": { +- // "$ref": "NetworkAttachmentAggregatedList" ++ // "$ref": "MachineImageList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -119637,7 +124430,7 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *NetworkAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkAttachmentAggregatedList) error) error { ++func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -119655,53 +124448,35 @@ func (c *NetworkAttachmentsAggregatedListCall) Pages(ctx context.Context, f func + } + } + +-// method id "compute.networkAttachments.delete": ++// method id "compute.machineImages.setIamPolicy": + +-type NetworkAttachmentsDeleteCall struct { +- s *Service +- project string +- region string +- networkAttachment string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type MachineImagesSetIamPolicyCall struct { ++ s *Service ++ project string ++ resource string ++ globalsetpolicyrequest *GlobalSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified NetworkAttachment in the given scope ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. + // +-// - networkAttachment: Name of the NetworkAttachment resource to +-// delete. +-// - project: Project ID for this request. +-// - region: Name of the region of this request. +-func (r *NetworkAttachmentsService) Delete(project string, region string, networkAttachment string) *NetworkAttachmentsDeleteCall { +- c := &NetworkAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++func (r *MachineImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *MachineImagesSetIamPolicyCall { ++ c := &MachineImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.networkAttachment = networkAttachment +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +-// MixerMutationRequestBuilder +-func (c *NetworkAttachmentsDeleteCall) RequestId(requestId string) *NetworkAttachmentsDeleteCall { +- c.urlParams_.Set("requestId", requestId) ++ c.resource = resource ++ c.globalsetpolicyrequest = globalsetpolicyrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsDeleteCall) Fields(s ...googleapi.Field) *NetworkAttachmentsDeleteCall { ++func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -119709,21 +124484,21 @@ func (c *NetworkAttachmentsDeleteCall) Fields(s ...googleapi.Field) *NetworkAtta + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsDeleteCall) Context(ctx context.Context) *NetworkAttachmentsDeleteCall { ++func (c *MachineImagesSetIamPolicyCall) Context(ctx context.Context) *MachineImagesSetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsDeleteCall) Header() http.Header { ++func (c *MachineImagesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -119731,31 +124506,35 @@ func (c *NetworkAttachmentsDeleteCall) doRequest(alt string) (*http.Response, er + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("DELETE", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "networkAttachment": c.networkAttachment, ++ "project": c.project, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.delete" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.machineImages.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -119774,7 +124553,7 @@ func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operat + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -119786,23 +124565,15 @@ func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operat + } + return ret, nil + // { +- // "description": "Deletes the specified NetworkAttachment in the given scope", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", +- // "httpMethod": "DELETE", +- // "id": "compute.networkAttachments.delete", ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/global/machineImages/{resource}/setIamPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.machineImages.setIamPolicy", + // "parameterOrder": [ + // "project", +- // "region", +- // "networkAttachment" ++ // "resource" + // ], + // "parameters": { +- // "networkAttachment": { +- // "description": "Name of the NetworkAttachment resource to delete.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -119810,22 +124581,20 @@ func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operat + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region of this request.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" +- // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", +- // "location": "query", +- // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", ++ // "path": "projects/{project}/global/machineImages/{resource}/setIamPolicy", ++ // "request": { ++ // "$ref": "GlobalSetPolicyRequest" ++ // }, + // "response": { +- // "$ref": "Operation" ++ // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -119835,105 +124604,93 @@ func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operat + + } + +-// method id "compute.networkAttachments.get": ++// method id "compute.machineImages.testIamPermissions": + +-type NetworkAttachmentsGetCall struct { +- s *Service +- project string +- region string +- networkAttachment string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type MachineImagesTestIamPermissionsCall struct { ++ s *Service ++ project string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified NetworkAttachment resource in the given +-// scope. ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. + // +-// - networkAttachment: Name of the NetworkAttachment resource to +-// return. +-// - project: Project ID for this request. +-// - region: Name of the region of this request. +-func (r *NetworkAttachmentsService) Get(project string, region string, networkAttachment string) *NetworkAttachmentsGetCall { +- c := &NetworkAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++func (r *MachineImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *MachineImagesTestIamPermissionsCall { ++ c := &MachineImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.networkAttachment = networkAttachment ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsGetCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetCall { ++func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *MachineImagesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *NetworkAttachmentsGetCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsGetCall) Context(ctx context.Context) *NetworkAttachmentsGetCall { ++func (c *MachineImagesTestIamPermissionsCall) Context(ctx context.Context) *MachineImagesTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsGetCall) Header() http.Header { ++func (c *MachineImagesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/machineImages/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "networkAttachment": c.networkAttachment, ++ "project": c.project, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.get" call. +-// Exactly one of *NetworkAttachment or error will be non-nil. Any ++// Do executes the "compute.machineImages.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either +-// *NetworkAttachment.ServerResponse.Header or (if a response was ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAttachment, error) { ++func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -119952,7 +124709,7 @@ func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAt + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkAttachment{ ++ ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -119964,23 +124721,15 @@ func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAt + } + return ret, nil + // { +- // "description": "Returns the specified NetworkAttachment resource in the given scope.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", +- // "httpMethod": "GET", +- // "id": "compute.networkAttachments.get", ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/global/machineImages/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.machineImages.testIamPermissions", + // "parameterOrder": [ + // "project", +- // "region", +- // "networkAttachment" ++ // "resource" + // ], + // "parameters": { +- // "networkAttachment": { +- // "description": "Name of the NetworkAttachment resource to return.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -119988,17 +124737,20 @@ func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAt + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region of this request.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", ++ // "path": "projects/{project}/global/machineImages/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, + // "response": { +- // "$ref": "NetworkAttachment" ++ // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -120009,46 +124761,127 @@ func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAt + + } + +-// method id "compute.networkAttachments.getIamPolicy": ++// method id "compute.machineTypes.aggregatedList": + +-type NetworkAttachmentsGetIamPolicyCall struct { ++type MachineTypesAggregatedListCall struct { + s *Service + project string +- region string +- resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// GetIamPolicy: Gets the access control policy for a resource. May be +-// empty if no such policy or resource exists. ++// AggregatedList: Retrieves an aggregated list of machine types. + // + // - project: Project ID for this request. +-// - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *NetworkAttachmentsService) GetIamPolicy(project string, region string, resource string) *NetworkAttachmentsGetIamPolicyCall { +- c := &NetworkAttachmentsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { ++ c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.resource = resource + return c + } + +-// OptionsRequestedPolicyVersion sets the optional parameter +-// "optionsRequestedPolicyVersion": Requested IAM Policy version. +-func (c *NetworkAttachmentsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NetworkAttachmentsGetIamPolicyCall { +- c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("filter", filter) + return c + } + +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *NetworkAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetIamPolicyCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c ++// IncludeAllScopes sets the optional parameter "includeAllScopes": ++// Indicates whether every visible scope for each scope type (zone, ++// region, global) should be included in the response. For new resource ++// types added after this field, the flag has no effect as new resource ++// types will always include every visible scope for each scope type in ++// response. For resource types which predate this field, if this flag ++// is omitted or false, only scopes of the scope types where the ++// resource type is expected to be found will be included. ++func (c *MachineTypesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *MachineTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c + } + + // IfNoneMatch sets the optional parameter which makes the operation +@@ -120056,7 +124889,7 @@ func (c *NetworkAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *Netwo + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetIamPolicyCall { ++func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -120064,21 +124897,21 @@ func (c *NetworkAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *Netw + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsGetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsGetIamPolicyCall { ++func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsGetIamPolicyCall) Header() http.Header { ++func (c *MachineTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -120091,7 +124924,7 @@ func (c *NetworkAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Respon + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/machineTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -120099,21 +124932,19 @@ func (c *NetworkAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Respon + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, ++ "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.getIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++// Do executes the "compute.machineTypes.aggregatedList" call. ++// Exactly one of *MachineTypeAggregatedList or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *MachineTypeAggregatedList.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -120132,7 +124963,7 @@ func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Policy{ ++ ret := &MachineTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -120144,47 +124975,58 @@ func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { +- // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", ++ // "description": "Retrieves an aggregated list of machine types.", ++ // "flatPath": "projects/{project}/aggregated/machineTypes", + // "httpMethod": "GET", +- // "id": "compute.networkAttachments.getIamPolicy", ++ // "id": "compute.machineTypes.aggregatedList", + // "parameterOrder": [ +- // "project", +- // "region", +- // "resource" ++ // "project" + // ], + // "parameters": { +- // "optionsRequestedPolicyVersion": { +- // "description": "Requested IAM Policy version.", +- // "format": "int32", ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "includeAllScopes": { ++ // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", + // "location": "query", ++ // "minimum": "0", + // "type": "integer" + // }, +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", + // "type": "string" + // }, +- // "region": { +- // "description": "The name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", ++ // "project": { ++ // "description": "Project ID for this request.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", ++ // "path": "projects/{project}/aggregated/machineTypes", + // "response": { +- // "$ref": "Policy" ++ // "$ref": "MachineTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -120195,110 +125037,124 @@ func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (* + + } + +-// method id "compute.networkAttachments.insert": ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} + +-type NetworkAttachmentsInsertCall struct { +- s *Service +- project string +- region string +- networkattachment *NetworkAttachment +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++// method id "compute.machineTypes.get": ++ ++type MachineTypesGetCall struct { ++ s *Service ++ project string ++ zone string ++ machineType string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates a NetworkAttachment in the specified project in the +-// given scope using the parameters that are included in the request. ++// Get: Returns the specified machine type. + // ++// - machineType: Name of the machine type to return. + // - project: Project ID for this request. +-// - region: Name of the region of this request. +-func (r *NetworkAttachmentsService) Insert(project string, region string, networkattachment *NetworkAttachment) *NetworkAttachmentsInsertCall { +- c := &NetworkAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - zone: The name of the zone for this request. ++func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { ++ c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region +- c.networkattachment = networkattachment +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +-// MixerMutationRequestBuilder +-func (c *NetworkAttachmentsInsertCall) RequestId(requestId string) *NetworkAttachmentsInsertCall { +- c.urlParams_.Set("requestId", requestId) ++ c.zone = zone ++ c.machineType = machineType + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsInsertCall) Fields(s ...googleapi.Field) *NetworkAttachmentsInsertCall { ++func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsInsertCall) Context(ctx context.Context) *NetworkAttachmentsInsertCall { ++func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsInsertCall) Header() http.Header { ++func (c *MachineTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkattachment) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes/{machineType}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, ++ "project": c.project, ++ "zone": c.zone, ++ "machineType": c.machineType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.insert" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// Do executes the "compute.machineTypes.get" call. ++// Exactly one of *MachineType or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at ++// *MachineType.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -120317,7 +125173,7 @@ func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operat + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &MachineType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -120329,15 +125185,23 @@ func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operat + } + return ret, nil + // { +- // "description": "Creates a NetworkAttachment in the specified project in the given scope using the parameters that are included in the request.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments", +- // "httpMethod": "POST", +- // "id": "compute.networkAttachments.insert", ++ // "description": "Returns the specified machine type.", ++ // "flatPath": "projects/{project}/zones/{zone}/machineTypes/{machineType}", ++ // "httpMethod": "GET", ++ // "id": "compute.machineTypes.get", + // "parameterOrder": [ + // "project", +- // "region" ++ // "zone", ++ // "machineType" + // ], + // "parameters": { ++ // "machineType": { ++ // "description": "Name of the machine type to return.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -120345,54 +125209,48 @@ func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operat + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region of this request.", ++ // "zone": { ++ // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" +- // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", +- // "location": "query", +- // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments", +- // "request": { +- // "$ref": "NetworkAttachment" +- // }, ++ // "path": "projects/{project}/zones/{zone}/machineTypes/{machineType}", + // "response": { +- // "$ref": "Operation" ++ // "$ref": "MachineType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.networkAttachments.list": ++// method id "compute.machineTypes.list": + +-type NetworkAttachmentsListCall struct { ++type MachineTypesListCall struct { + s *Service + project string +- region string ++ zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// List: Lists the NetworkAttachments for a project in the given scope. ++// List: Retrieves a list of machine types available to the specified ++// project. + // + // - project: Project ID for this request. +-// - region: Name of the region of this request. +-func (r *NetworkAttachmentsService) List(project string, region string) *NetworkAttachmentsListCall { +- c := &NetworkAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - zone: The name of the zone for this request. ++func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { ++ c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.region = region ++ c.zone = zone + return c + } + +@@ -120431,7 +125289,7 @@ func (r *NetworkAttachmentsService) List(project string, region string) *Network + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *NetworkAttachmentsListCall) Filter(filter string) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -120442,7 +125300,7 @@ func (c *NetworkAttachmentsListCall) Filter(filter string) *NetworkAttachmentsLi + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *NetworkAttachmentsListCall) MaxResults(maxResults int64) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -120456,7 +125314,7 @@ func (c *NetworkAttachmentsListCall) MaxResults(maxResults int64) *NetworkAttach + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *NetworkAttachmentsListCall) OrderBy(orderBy string) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -120464,7 +125322,7 @@ func (c *NetworkAttachmentsListCall) OrderBy(orderBy string) *NetworkAttachments + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *NetworkAttachmentsListCall) PageToken(pageToken string) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -120473,7 +125331,7 @@ func (c *NetworkAttachmentsListCall) PageToken(pageToken string) *NetworkAttachm + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *NetworkAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -120481,7 +125339,7 @@ func (c *NetworkAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess b + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkAttachmentsListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -120491,7 +125349,7 @@ func (c *NetworkAttachmentsListCall) Fields(s ...googleapi.Field) *NetworkAttach + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkAttachmentsListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -120499,21 +125357,21 @@ func (c *NetworkAttachmentsListCall) IfNoneMatch(entityTag string) *NetworkAttac + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkAttachmentsListCall) Context(ctx context.Context) *NetworkAttachmentsListCall { ++func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkAttachmentsListCall) Header() http.Header { ++func (c *MachineTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkAttachmentsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -120526,7 +125384,7 @@ func (c *NetworkAttachmentsListCall) doRequest(alt string) (*http.Response, erro + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/machineTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -120535,19 +125393,19 @@ func (c *NetworkAttachmentsListCall) doRequest(alt string) (*http.Response, erro + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "region": c.region, ++ "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkAttachments.list" call. +-// Exactly one of *NetworkAttachmentList or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *NetworkAttachmentList.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.machineTypes.list" call. ++// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *MachineTypeList.ServerResponse.Header or (if a response was returned ++// at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentList, error) { ++func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -120566,7 +125424,7 @@ func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkA + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkAttachmentList{ ++ ret := &MachineTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -120578,13 +125436,13 @@ func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkA + } + return ret, nil + // { +- // "description": "Lists the NetworkAttachments for a project in the given scope.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments", ++ // "description": "Retrieves a list of machine types available to the specified project.", ++ // "flatPath": "projects/{project}/zones/{zone}/machineTypes", + // "httpMethod": "GET", +- // "id": "compute.networkAttachments.list", ++ // "id": "compute.machineTypes.list", + // "parameterOrder": [ + // "project", +- // "region" ++ // "zone" + // ], + // "parameters": { + // "filter": { +@@ -120617,22 +125475,22 @@ func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkA + // "required": true, + // "type": "string" + // }, +- // "region": { +- // "description": "Name of the region of this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments", ++ // "path": "projects/{project}/zones/{zone}/machineTypes", + // "response": { +- // "$ref": "NetworkAttachmentList" ++ // "$ref": "MachineTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -120646,7 +125504,7 @@ func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkA + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkAttachmentList) error) error { ++func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -120664,346 +125522,9 @@ func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkA + } + } + +-// method id "compute.networkAttachments.setIamPolicy": +- +-type NetworkAttachmentsSetIamPolicyCall struct { +- s *Service +- project string +- region string +- resource string +- regionsetpolicyrequest *RegionSetPolicyRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header +-} +- +-// SetIamPolicy: Sets the access control policy on the specified +-// resource. Replaces any existing policy. +-// +-// - project: Project ID for this request. +-// - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *NetworkAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NetworkAttachmentsSetIamPolicyCall { +- c := &NetworkAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.region = region +- c.resource = resource +- c.regionsetpolicyrequest = regionsetpolicyrequest +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsSetIamPolicyCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *NetworkAttachmentsSetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsSetIamPolicyCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *NetworkAttachmentsSetIamPolicyCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.networkAttachments.setIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &Policy{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", +- // "httpMethod": "POST", +- // "id": "compute.networkAttachments.setIamPolicy", +- // "parameterOrder": [ +- // "project", +- // "region", +- // "resource" +- // ], +- // "parameters": { +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // }, +- // "region": { +- // "description": "The name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", +- // "request": { +- // "$ref": "RegionSetPolicyRequest" +- // }, +- // "response": { +- // "$ref": "Policy" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" +- // ] +- // } +- +-} +- +-// method id "compute.networkAttachments.testIamPermissions": +- +-type NetworkAttachmentsTestIamPermissionsCall struct { +- s *Service +- project string +- region string +- resource string +- testpermissionsrequest *TestPermissionsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header +-} +- +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. +-// +-// - project: Project ID for this request. +-// - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *NetworkAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkAttachmentsTestIamPermissionsCall { +- c := &NetworkAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.region = region +- c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkAttachmentsTestIamPermissionsCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *NetworkAttachmentsTestIamPermissionsCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.networkAttachments.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &TestPermissionsResponse{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", +- // "httpMethod": "POST", +- // "id": "compute.networkAttachments.testIamPermissions", +- // "parameterOrder": [ +- // "project", +- // "region", +- // "resource" +- // ], +- // "parameters": { +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // }, +- // "region": { +- // "description": "The name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", +- // "request": { +- // "$ref": "TestPermissionsRequest" +- // }, +- // "response": { +- // "$ref": "TestPermissionsResponse" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" +- // ] +- // } +- +-} +- +-// method id "compute.networkEdgeSecurityServices.aggregatedList": ++// method id "compute.networkAttachments.aggregatedList": + +-type NetworkEdgeSecurityServicesAggregatedListCall struct { ++type NetworkAttachmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams +@@ -121012,12 +125533,12 @@ type NetworkEdgeSecurityServicesAggregatedListCall struct { + header_ http.Header + } + +-// AggregatedList: Retrieves the list of all NetworkEdgeSecurityService +-// resources available to the specified project. ++// AggregatedList: Retrieves the list of all NetworkAttachment ++// resources, regional and global, available to the specified project. + // +-// - project: Name of the project scoping this request. +-func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *NetworkEdgeSecurityServicesAggregatedListCall { +- c := &NetworkEdgeSecurityServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++func (r *NetworkAttachmentsService) AggregatedList(project string) *NetworkAttachmentsAggregatedListCall { ++ c := &NetworkAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c + } +@@ -121057,7 +125578,7 @@ func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *Net + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) Filter(filter string) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -121070,7 +125591,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *N + // response. For resource types which predate this field, if this flag + // is omitted or false, only scopes of the scope types where the + // resource type is expected to be found will be included. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c + } +@@ -121081,7 +125602,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(include + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults int64) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) MaxResults(maxResults int64) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -121095,7 +125616,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults in + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) OrderBy(orderBy string) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -121103,7 +125624,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken string) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) PageToken(pageToken string) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -121112,7 +125633,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken stri + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -121120,7 +125641,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(ret + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -121130,7 +125651,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Fi + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -121138,21 +125659,21 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag st + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesAggregatedListCall { ++func (c *NetworkAttachmentsAggregatedListCall) Context(ctx context.Context) *NetworkAttachmentsAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Header() http.Header { ++func (c *NetworkAttachmentsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -121165,7 +125686,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (* + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEdgeSecurityServices") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -121178,16 +125699,14 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (* + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEdgeSecurityServices.aggregatedList" call. +-// Exactly one of *NetworkEdgeSecurityServiceAggregatedList or error +-// will be non-nil. Any non-2xx status code is an error. Response +-// headers are in either +-// *NetworkEdgeSecurityServiceAggregatedList.ServerResponse.Header or +-// (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityServiceAggregatedList, error) { ++// Do executes the "compute.networkAttachments.aggregatedList" call. ++// Exactly one of *NetworkAttachmentAggregatedList or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *NetworkAttachmentAggregatedList.ServerResponse.Header or (if ++// a response was returned at all) in error.(*googleapi.Error).Header. ++// Use googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -121206,7 +125725,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEdgeSecurityServiceAggregatedList{ ++ ret := &NetworkAttachmentAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -121218,10 +125737,10 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal + } + return ret, nil + // { +- // "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", +- // "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", ++ // "description": "Retrieves the list of all NetworkAttachment resources, regional and global, available to the specified project.", ++ // "flatPath": "projects/{project}/aggregated/networkAttachments", + // "httpMethod": "GET", +- // "id": "compute.networkEdgeSecurityServices.aggregatedList", ++ // "id": "compute.networkAttachments.aggregatedList", + // "parameterOrder": [ + // "project" + // ], +@@ -121255,7 +125774,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal + // "type": "string" + // }, + // "project": { +- // "description": "Name of the project scoping this request.", ++ // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, +@@ -121267,9 +125786,9 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/aggregated/networkEdgeSecurityServices", ++ // "path": "projects/{project}/aggregated/networkAttachments", + // "response": { +- // "$ref": "NetworkEdgeSecurityServiceAggregatedList" ++ // "$ref": "NetworkAttachmentAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -121283,7 +125802,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEdgeSecurityServiceAggregatedList) error) error { ++func (c *NetworkAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkAttachmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -121301,29 +125820,29 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Contex + } + } + +-// method id "compute.networkEdgeSecurityServices.delete": ++// method id "compute.networkAttachments.delete": + +-type NetworkEdgeSecurityServicesDeleteCall struct { +- s *Service +- project string +- region string +- networkEdgeSecurityService string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsDeleteCall struct { ++ s *Service ++ project string ++ region string ++ networkAttachment string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified service. ++// Delete: Deletes the specified NetworkAttachment in the given scope + // +-// - networkEdgeSecurityService: Name of the network edge security +-// service to delete. ++// - networkAttachment: Name of the NetworkAttachment resource to ++// delete. + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *NetworkEdgeSecurityServicesService) Delete(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesDeleteCall { +- c := &NetworkEdgeSecurityServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region of this request. ++func (r *NetworkAttachmentsService) Delete(project string, region string, networkAttachment string) *NetworkAttachmentsDeleteCall { ++ c := &NetworkAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.networkEdgeSecurityService = networkEdgeSecurityService ++ c.networkAttachment = networkAttachment + return c + } + +@@ -121337,8 +125856,9 @@ func (r *NetworkEdgeSecurityServicesService) Delete(project string, region strin + // received, and if so, will ignore the second request. This prevents + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *NetworkEdgeSecurityServicesDeleteCall { ++// supported ( 00000000-0000-0000-0000-000000000000). end_interface: ++// MixerMutationRequestBuilder ++func (c *NetworkAttachmentsDeleteCall) RequestId(requestId string) *NetworkAttachmentsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -121346,7 +125866,7 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *Net + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesDeleteCall { ++func (c *NetworkAttachmentsDeleteCall) Fields(s ...googleapi.Field) *NetworkAttachmentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -121354,21 +125874,21 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *Ne + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEdgeSecurityServicesDeleteCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesDeleteCall { ++func (c *NetworkAttachmentsDeleteCall) Context(ctx context.Context) *NetworkAttachmentsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEdgeSecurityServicesDeleteCall) Header() http.Header { ++func (c *NetworkAttachmentsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -121378,7 +125898,7 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Res + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { +@@ -121386,21 +125906,21 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Res + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "networkEdgeSecurityService": c.networkEdgeSecurityService, ++ "project": c.project, ++ "region": c.region, ++ "networkAttachment": c.networkAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEdgeSecurityServices.delete" call. ++// Do executes the "compute.networkAttachments.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -121431,18 +125951,18 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Deletes the specified service.", +- // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "description": "Deletes the specified NetworkAttachment in the given scope", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "httpMethod": "DELETE", +- // "id": "compute.networkEdgeSecurityServices.delete", ++ // "id": "compute.networkAttachments.delete", + // "parameterOrder": [ + // "project", + // "region", +- // "networkEdgeSecurityService" ++ // "networkAttachment" + // ], + // "parameters": { +- // "networkEdgeSecurityService": { +- // "description": "Name of the network edge security service to delete.", ++ // "networkAttachment": { ++ // "description": "Name of the NetworkAttachment resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -121456,19 +125976,19 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "response": { + // "$ref": "Operation" + // }, +@@ -121480,37 +126000,38 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.networkEdgeSecurityServices.get": ++// method id "compute.networkAttachments.get": + +-type NetworkEdgeSecurityServicesGetCall struct { +- s *Service +- project string +- region string +- networkEdgeSecurityService string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsGetCall struct { ++ s *Service ++ project string ++ region string ++ networkAttachment string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Gets a specified NetworkEdgeSecurityService. ++// Get: Returns the specified NetworkAttachment resource in the given ++// scope. + // +-// - networkEdgeSecurityService: Name of the network edge security +-// service to get. ++// - networkAttachment: Name of the NetworkAttachment resource to ++// return. + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *NetworkEdgeSecurityServicesService) Get(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesGetCall { +- c := &NetworkEdgeSecurityServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region of this request. ++func (r *NetworkAttachmentsService) Get(project string, region string, networkAttachment string) *NetworkAttachmentsGetCall { ++ c := &NetworkAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.networkEdgeSecurityService = networkEdgeSecurityService ++ c.networkAttachment = networkAttachment + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesGetCall { ++func (c *NetworkAttachmentsGetCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -121520,7 +126041,7 @@ func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *Netwo + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesGetCall { ++func (c *NetworkAttachmentsGetCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -121528,21 +126049,21 @@ func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *Netw + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEdgeSecurityServicesGetCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesGetCall { ++func (c *NetworkAttachmentsGetCall) Context(ctx context.Context) *NetworkAttachmentsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEdgeSecurityServicesGetCall) Header() http.Header { ++func (c *NetworkAttachmentsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -121555,7 +126076,7 @@ func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Respon + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -121563,21 +126084,21 @@ func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Respon + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "networkEdgeSecurityService": c.networkEdgeSecurityService, ++ "project": c.project, ++ "region": c.region, ++ "networkAttachment": c.networkAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEdgeSecurityServices.get" call. +-// Exactly one of *NetworkEdgeSecurityService or error will be non-nil. +-// Any non-2xx status code is an error. Response headers are in either +-// *NetworkEdgeSecurityService.ServerResponse.Header or (if a response +-// was returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.networkAttachments.get" call. ++// Exactly one of *NetworkAttachment or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *NetworkAttachment.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityService, error) { ++func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAttachment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -121596,7 +126117,7 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEdgeSecurityService{ ++ ret := &NetworkAttachment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -121608,18 +126129,18 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { +- // "description": "Gets a specified NetworkEdgeSecurityService.", +- // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "description": "Returns the specified NetworkAttachment resource in the given scope.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "httpMethod": "GET", +- // "id": "compute.networkEdgeSecurityServices.get", ++ // "id": "compute.networkAttachments.get", + // "parameterOrder": [ + // "project", + // "region", +- // "networkEdgeSecurityService" ++ // "networkAttachment" + // ], + // "parameters": { +- // "networkEdgeSecurityService": { +- // "description": "Name of the network edge security service to get.", ++ // "networkAttachment": { ++ // "description": "Name of the NetworkAttachment resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -121633,16 +126154,16 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "response": { +- // "$ref": "NetworkEdgeSecurityService" ++ // "$ref": "NetworkAttachment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -121653,116 +126174,111 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* + + } + +-// method id "compute.networkEdgeSecurityServices.insert": ++// method id "compute.networkAttachments.getIamPolicy": + +-type NetworkEdgeSecurityServicesInsertCall struct { +- s *Service +- project string +- region string +- networkedgesecurityservice *NetworkEdgeSecurityService +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsGetIamPolicyCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates a new service in the specified project using the data +-// included in the request. ++// GetIamPolicy: Gets the access control policy for a resource. May be ++// empty if no such policy or resource exists. + // + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *NetworkEdgeSecurityServicesService) Insert(project string, region string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesInsertCall { +- c := &NetworkEdgeSecurityServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *NetworkAttachmentsService) GetIamPolicy(project string, region string, resource string) *NetworkAttachmentsGetIamPolicyCall { ++ c := &NetworkAttachmentsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.networkedgesecurityservice = networkedgesecurityservice +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEdgeSecurityServicesInsertCall) RequestId(requestId string) *NetworkEdgeSecurityServicesInsertCall { +- c.urlParams_.Set("requestId", requestId) ++ c.resource = resource + return c + } + +-// ValidateOnly sets the optional parameter "validateOnly": If true, the +-// request will not be committed. +-func (c *NetworkEdgeSecurityServicesInsertCall) ValidateOnly(validateOnly bool) *NetworkEdgeSecurityServicesInsertCall { +- c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) ++// OptionsRequestedPolicyVersion sets the optional parameter ++// "optionsRequestedPolicyVersion": Requested IAM Policy version. ++func (c *NetworkAttachmentsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NetworkAttachmentsGetIamPolicyCall { ++ c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEdgeSecurityServicesInsertCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesInsertCall { ++func (c *NetworkAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *NetworkEdgeSecurityServicesInsertCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesInsertCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetIamPolicyCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkAttachmentsGetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsGetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEdgeSecurityServicesInsertCall) Header() http.Header { ++func (c *NetworkAttachmentsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEdgeSecurityServicesInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEdgeSecurityServices.insert" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.networkAttachments.getIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -121781,7 +126297,7 @@ func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -121793,15 +126309,22 @@ func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Creates a new service in the specified project using the data included in the request.", +- // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", +- // "httpMethod": "POST", +- // "id": "compute.networkEdgeSecurityServices.insert", ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", ++ // "httpMethod": "GET", ++ // "id": "compute.networkAttachments.getIamPolicy", + // "parameterOrder": [ + // "project", +- // "region" ++ // "region", ++ // "resource" + // ], + // "parameters": { ++ // "optionsRequestedPolicyVersion": { ++ // "description": "Requested IAM Policy version.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -121810,70 +126333,55 @@ func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, + // "type": "string" +- // }, +- // "validateOnly": { +- // "description": "If true, the request will not be committed.", +- // "location": "query", +- // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", +- // "request": { +- // "$ref": "NetworkEdgeSecurityService" +- // }, ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", + // "response": { +- // "$ref": "Operation" ++ // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.networkEdgeSecurityServices.patch": ++// method id "compute.networkAttachments.insert": + +-type NetworkEdgeSecurityServicesPatchCall struct { +- s *Service +- project string +- region string +- networkEdgeSecurityService string +- networkedgesecurityservice *NetworkEdgeSecurityService +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsInsertCall struct { ++ s *Service ++ project string ++ region string ++ networkattachment *NetworkAttachment ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Patch: Patches the specified policy with the data included in the +-// request. ++// Insert: Creates a NetworkAttachment in the specified project in the ++// given scope using the parameters that are included in the request. + // +-// - networkEdgeSecurityService: Name of the network edge security +-// service to update. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *NetworkEdgeSecurityServicesService) Patch(project string, region string, networkEdgeSecurityService string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesPatchCall { +- c := &NetworkEdgeSecurityServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - region: Name of the region of this request. ++func (r *NetworkAttachmentsService) Insert(project string, region string, networkattachment *NetworkAttachment) *NetworkAttachmentsInsertCall { ++ c := &NetworkAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.networkEdgeSecurityService = networkEdgeSecurityService +- c.networkedgesecurityservice = networkedgesecurityservice +- return c +-} +- +-// Paths sets the optional parameter "paths": +-func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEdgeSecurityServicesPatchCall { +- c.urlParams_.SetMulti("paths", append([]string{}, paths...)) ++ c.networkattachment = networkattachment + return c + } + +@@ -121887,23 +126395,17 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEd + // received, and if so, will ignore the second request. This prevents + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEdgeSecurityServicesPatchCall) RequestId(requestId string) *NetworkEdgeSecurityServicesPatchCall { ++// supported ( 00000000-0000-0000-0000-000000000000). end_interface: ++// MixerMutationRequestBuilder ++func (c *NetworkAttachmentsInsertCall) RequestId(requestId string) *NetworkAttachmentsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } + +-// UpdateMask sets the optional parameter "updateMask": Indicates fields +-// to be updated as part of this request. +-func (c *NetworkEdgeSecurityServicesPatchCall) UpdateMask(updateMask string) *NetworkEdgeSecurityServicesPatchCall { +- c.urlParams_.Set("updateMask", updateMask) +- return c +-} +- + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesPatchCall { ++func (c *NetworkAttachmentsInsertCall) Fields(s ...googleapi.Field) *NetworkAttachmentsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -121911,21 +126413,21 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *Net + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEdgeSecurityServicesPatchCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesPatchCall { ++func (c *NetworkAttachmentsInsertCall) Context(ctx context.Context) *NetworkAttachmentsInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEdgeSecurityServicesPatchCall) Header() http.Header { ++func (c *NetworkAttachmentsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -121933,36 +126435,35 @@ func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Resp + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "networkEdgeSecurityService": c.networkEdgeSecurityService, ++ "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEdgeSecurityServices.patch" call. ++// Do executes the "compute.networkAttachments.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -121993,28 +126494,15 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Patches the specified policy with the data included in the request.", +- // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", +- // "httpMethod": "PATCH", +- // "id": "compute.networkEdgeSecurityServices.patch", ++ // "description": "Creates a NetworkAttachment in the specified project in the given scope using the parameters that are included in the request.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments", ++ // "httpMethod": "POST", ++ // "id": "compute.networkAttachments.insert", + // "parameterOrder": [ + // "project", +- // "region", +- // "networkEdgeSecurityService" ++ // "region" + // ], + // "parameters": { +- // "networkEdgeSecurityService": { +- // "description": "Name of the network edge security service to update.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, +- // "paths": { +- // "location": "query", +- // "repeated": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -122023,27 +126511,21 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", +- // "type": "string" +- // }, +- // "updateMask": { +- // "description": "Indicates fields to be updated as part of this request.", +- // "format": "google-fieldmask", ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "path": "projects/{project}/regions/{region}/networkAttachments", + // "request": { +- // "$ref": "NetworkEdgeSecurityService" ++ // "$ref": "NetworkAttachment" + // }, + // "response": { + // "$ref": "Operation" +@@ -122056,24 +126538,26 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.networkEndpointGroups.aggregatedList": ++// method id "compute.networkAttachments.list": + +-type NetworkEndpointGroupsAggregatedListCall struct { ++type NetworkAttachmentsListCall struct { + s *Service + project string ++ region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header + } + +-// AggregatedList: Retrieves the list of network endpoint groups and +-// sorts them by zone. ++// List: Lists the NetworkAttachments for a project in the given scope. + // + // - project: Project ID for this request. +-func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { +- c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region of this request. ++func (r *NetworkAttachmentsService) List(project string, region string) *NetworkAttachmentsListCall { ++ c := &NetworkAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project ++ c.region = region + return c + } + +@@ -122112,31 +126596,18 @@ func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEn + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) Filter(filter string) *NetworkAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c + } + +-// IncludeAllScopes sets the optional parameter "includeAllScopes": +-// Indicates whether every visible scope for each scope type (zone, +-// region, global) should be included in the response. For new resource +-// types added after this field, the flag has no effect as new resource +-// types will always include every visible scope for each scope type in +-// response. For resource types which predate this field, if this flag +-// is omitted or false, only scopes of the scope types where the +-// resource type is expected to be found will be included. +-func (c *NetworkEndpointGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEndpointGroupsAggregatedListCall { +- c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) +- return c +-} +- + // MaxResults sets the optional parameter "maxResults": The maximum + // number of results per page that should be returned. If the number of + // available results is larger than `maxResults`, Compute Engine returns + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) MaxResults(maxResults int64) *NetworkAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -122150,7 +126621,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) * + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) OrderBy(orderBy string) *NetworkAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -122158,7 +126629,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *Netwo + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) PageToken(pageToken string) *NetworkAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -122167,7 +126638,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *N + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -122175,7 +126646,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPar + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -122185,7 +126656,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) * + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -122193,21 +126664,21 @@ func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { ++func (c *NetworkAttachmentsListCall) Context(ctx context.Context) *NetworkAttachmentsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { ++func (c *NetworkAttachmentsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -122220,7 +126691,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.R + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEndpointGroups") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -122229,19 +126700,19 @@ func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.R + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.aggregatedList" call. +-// Exactly one of *NetworkEndpointGroupAggregatedList or error will be +-// non-nil. Any non-2xx status code is an error. Response headers are in +-// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or +-// (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { ++// Do executes the "compute.networkAttachments.list" call. ++// Exactly one of *NetworkAttachmentList or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *NetworkAttachmentList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -122260,7 +126731,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEndpointGroupAggregatedList{ ++ ret := &NetworkAttachmentList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -122272,12 +126743,13 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio + } + return ret, nil + // { +- // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", +- // "flatPath": "projects/{project}/aggregated/networkEndpointGroups", ++ // "description": "Lists the NetworkAttachments for a project in the given scope.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments", + // "httpMethod": "GET", +- // "id": "compute.networkEndpointGroups.aggregatedList", ++ // "id": "compute.networkAttachments.list", + // "parameterOrder": [ +- // "project" ++ // "project", ++ // "region" + // ], + // "parameters": { + // "filter": { +@@ -122285,11 +126757,6 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio + // "location": "query", + // "type": "string" + // }, +- // "includeAllScopes": { +- // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -122315,15 +126782,22 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region of this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/aggregated/networkEndpointGroups", ++ // "path": "projects/{project}/regions/{region}/networkAttachments", + // "response": { +- // "$ref": "NetworkEndpointGroupAggregatedList" ++ // "$ref": "NetworkAttachmentList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -122337,7 +126811,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { ++func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -122355,57 +126829,38 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f f + } + } + +-// method id "compute.networkEndpointGroups.attachNetworkEndpoints": ++// method id "compute.networkAttachments.setIamPolicy": + +-type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { +- s *Service +- project string +- zone string +- networkEndpointGroup string +- networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsSetIamPolicyCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ regionsetpolicyrequest *RegionSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// AttachNetworkEndpoints: Attach a list of network endpoints to the +-// specified network endpoint group. ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. + // +-// - networkEndpointGroup: The name of the network endpoint group where +-// you are attaching network endpoints to. It should comply with +-// RFC1035. +-// - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +- c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *NetworkAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NetworkAttachmentsSetIamPolicyCall { ++ c := &NetworkAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.networkEndpointGroup = networkEndpointGroup +- c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +- c.urlParams_.Set("requestId", requestId) ++ c.region = region ++ c.resource = resource ++ c.regionsetpolicyrequest = regionsetpolicyrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { ++func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -122413,21 +126868,21 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi. + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { ++func (c *NetworkAttachmentsSetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsSetIamPolicyCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { ++func (c *NetworkAttachmentsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -122435,14 +126890,14 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -122450,21 +126905,21 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "networkEndpointGroup": c.networkEndpointGroup, ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.networkAttachments.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -122483,7 +126938,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -122495,22 +126950,16 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C + } + return ret, nil + // { +- // "description": "Attach a list of network endpoints to the specified network endpoint group.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + // "httpMethod": "POST", +- // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", ++ // "id": "compute.networkAttachments.setIamPolicy", + // "parameterOrder": [ + // "project", +- // "zone", +- // "networkEndpointGroup" ++ // "region", ++ // "resource" + // ], + // "parameters": { +- // "networkEndpointGroup": { +- // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -122518,24 +126967,27 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + // "request": { +- // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" ++ // "$ref": "RegionSetPolicyRequest" + // }, + // "response": { +- // "$ref": "Operation" ++ // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -122545,56 +126997,38 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C + + } + +-// method id "compute.networkEndpointGroups.delete": ++// method id "compute.networkAttachments.testIamPermissions": + +-type NetworkEndpointGroupsDeleteCall struct { +- s *Service +- project string +- zone string +- networkEndpointGroup string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkAttachmentsTestIamPermissionsCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified network endpoint group. The network +-// endpoints in the NEG and the VM instances they belong to are not +-// terminated when the NEG is deleted. Note that the NEG cannot be +-// deleted if there are backend services referencing it. ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. + // +-// - networkEndpointGroup: The name of the network endpoint group to +-// delete. It should comply with RFC1035. +-// - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { +- c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *NetworkAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkAttachmentsTestIamPermissionsCall { ++ c := &NetworkAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.networkEndpointGroup = networkEndpointGroup +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { +- c.urlParams_.Set("requestId", requestId) ++ c.region = region ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { ++func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkAttachmentsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -122602,21 +127036,21 @@ func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkE + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { ++func (c *NetworkAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *NetworkAttachmentsTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { ++func (c *NetworkAttachmentsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -122624,31 +127058,36 @@ func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("DELETE", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "networkEndpointGroup": c.networkEndpointGroup, ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.delete" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.networkAttachments.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -122667,7 +127106,7 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -122679,22 +127118,16 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope + } + return ret, nil + // { +- // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", +- // "httpMethod": "DELETE", +- // "id": "compute.networkEndpointGroups.delete", ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.networkAttachments.testIamPermissions", + // "parameterOrder": [ + // "project", +- // "zone", +- // "networkEndpointGroup" ++ // "region", ++ // "resource" + // ], + // "parameters": { +- // "networkEndpointGroup": { +- // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -122702,139 +127135,224 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", ++ // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, + // "response": { +- // "$ref": "Operation" ++ // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.networkEndpointGroups.detachNetworkEndpoints": ++// method id "compute.networkEdgeSecurityServices.aggregatedList": + +-type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { +- s *Service +- project string +- zone string +- networkEndpointGroup string +- networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEdgeSecurityServicesAggregatedListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// DetachNetworkEndpoints: Detach a list of network endpoints from the +-// specified network endpoint group. ++// AggregatedList: Retrieves the list of all NetworkEdgeSecurityService ++// resources available to the specified project. + // +-// - networkEndpointGroup: The name of the network endpoint group where +-// you are removing network endpoints. It should comply with RFC1035. +-// - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +- c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Name of the project scoping this request. ++func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c := &NetworkEdgeSecurityServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.networkEndpointGroup = networkEndpointGroup +- c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest + return c + } + +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +- c.urlParams_.Set("requestId", requestId) ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// IncludeAllScopes sets the optional parameter "includeAllScopes": ++// Indicates whether every visible scope for each scope type (zone, ++// region, global) should be included in the response. For new resource ++// types added after this field, the flag has no effect as new resource ++// types will always include every visible scope for each scope type in ++// response. For resource types which predate this field, if this flag ++// is omitted or false, only scopes of the scope types where the ++// resource type is expected to be found will be included. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults int64) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken string) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesAggregatedListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEdgeSecurityServices") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "networkEndpointGroup": c.networkEndpointGroup, ++ "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.networkEdgeSecurityServices.aggregatedList" call. ++// Exactly one of *NetworkEdgeSecurityServiceAggregatedList or error ++// will be non-nil. Any non-2xx status code is an error. Response ++// headers are in either ++// *NetworkEdgeSecurityServiceAggregatedList.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityServiceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -122853,7 +127371,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &NetworkEdgeSecurityServiceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -122865,155 +127383,189 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C + } + return ret, nil + // { +- // "description": "Detach a list of network endpoints from the specified network endpoint group.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", +- // "httpMethod": "POST", +- // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", ++ // "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", ++ // "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", ++ // "httpMethod": "GET", ++ // "id": "compute.networkEdgeSecurityServices.aggregatedList", + // "parameterOrder": [ +- // "project", +- // "zone", +- // "networkEndpointGroup" ++ // "project" + // ], + // "parameters": { +- // "networkEndpointGroup": { +- // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", + // "type": "string" + // }, +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, ++ // "includeAllScopes": { ++ // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "project": { ++ // "description": "Name of the project scoping this request.", + // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", +- // "request": { +- // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" +- // }, ++ // "path": "projects/{project}/aggregated/networkEdgeSecurityServices", + // "response": { +- // "$ref": "Operation" ++ // "$ref": "NetworkEdgeSecurityServiceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.networkEndpointGroups.get": ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEdgeSecurityServiceAggregatedList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} + +-type NetworkEndpointGroupsGetCall struct { +- s *Service +- project string +- zone string +- networkEndpointGroup string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++// method id "compute.networkEdgeSecurityServices.delete": ++ ++type NetworkEdgeSecurityServicesDeleteCall struct { ++ s *Service ++ project string ++ region string ++ networkEdgeSecurityService string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified network endpoint group. ++// Delete: Deletes the specified service. + // +-// - networkEndpointGroup: The name of the network endpoint group. It +-// should comply with RFC1035. ++// - networkEdgeSecurityService: Name of the network edge security ++// service to delete. + // - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { +- c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region scoping this request. ++func (r *NetworkEdgeSecurityServicesService) Delete(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesDeleteCall { ++ c := &NetworkEdgeSecurityServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.networkEndpointGroup = networkEndpointGroup ++ c.region = region ++ c.networkEdgeSecurityService = networkEdgeSecurityService ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *NetworkEdgeSecurityServicesDeleteCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { ++func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { ++func (c *NetworkEdgeSecurityServicesDeleteCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsGetCall) Header() http.Header { ++func (c *NetworkEdgeSecurityServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "networkEndpointGroup": c.networkEndpointGroup, ++ "project": c.project, ++ "region": c.region, ++ "networkEdgeSecurityService": c.networkEdgeSecurityService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.get" call. +-// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { ++// Do executes the "compute.networkEdgeSecurityServices.delete" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123032,7 +127584,7 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEndpointGroup{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -123044,19 +127596,20 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ + } + return ret, nil + // { +- // "description": "Returns the specified network endpoint group.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", +- // "httpMethod": "GET", +- // "id": "compute.networkEndpointGroups.get", ++ // "description": "Deletes the specified service.", ++ // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.networkEdgeSecurityServices.delete", + // "parameterOrder": [ + // "project", +- // "zone", +- // "networkEndpointGroup" ++ // "region", ++ // "networkEdgeSecurityService" + // ], + // "parameters": { +- // "networkEndpointGroup": { +- // "description": "The name of the network endpoint group. It should comply with RFC1035.", ++ // "networkEdgeSecurityService": { ++ // "description": "Name of the network edge security service to delete.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -123067,49 +127620,226 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ + // "required": true, + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "region": { ++ // "description": "Name of the region scoping this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", ++ // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "response": { +- // "$ref": "NetworkEndpointGroup" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.networkEndpointGroups.insert": ++// method id "compute.networkEdgeSecurityServices.get": + +-type NetworkEndpointGroupsInsertCall struct { +- s *Service +- project string +- zone string +- networkendpointgroup *NetworkEndpointGroup +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEdgeSecurityServicesGetCall struct { ++ s *Service ++ project string ++ region string ++ networkEdgeSecurityService string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates a network endpoint group in the specified project +-// using the parameters that are included in the request. ++// Get: Gets a specified NetworkEdgeSecurityService. + // ++// - networkEdgeSecurityService: Name of the network edge security ++// service to get. + // - project: Project ID for this request. +-// - zone: The name of the zone where you want to create the network +-// endpoint group. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { +- c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.zone = zone +- c.networkendpointgroup = networkendpointgroup ++// - region: Name of the region scoping this request. ++func (r *NetworkEdgeSecurityServicesService) Get(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesGetCall { ++ c := &NetworkEdgeSecurityServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.networkEdgeSecurityService = networkEdgeSecurityService ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEdgeSecurityServicesGetCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEdgeSecurityServicesGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "networkEdgeSecurityService": c.networkEdgeSecurityService, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEdgeSecurityServices.get" call. ++// Exactly one of *NetworkEdgeSecurityService or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *NetworkEdgeSecurityService.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityService, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &NetworkEdgeSecurityService{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Gets a specified NetworkEdgeSecurityService.", ++ // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "httpMethod": "GET", ++ // "id": "compute.networkEdgeSecurityServices.get", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "networkEdgeSecurityService" ++ // ], ++ // "parameters": { ++ // "networkEdgeSecurityService": { ++ // "description": "Name of the network edge security service to get.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "response": { ++ // "$ref": "NetworkEdgeSecurityService" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkEdgeSecurityServices.insert": ++ ++type NetworkEdgeSecurityServicesInsertCall struct { ++ s *Service ++ project string ++ region string ++ networkedgesecurityservice *NetworkEdgeSecurityService ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates a new service in the specified project using the data ++// included in the request. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *NetworkEdgeSecurityServicesService) Insert(project string, region string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesInsertCall { ++ c := &NetworkEdgeSecurityServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.networkedgesecurityservice = networkedgesecurityservice + return c + } + +@@ -123124,15 +127854,22 @@ func (r *NetworkEndpointGroupsService) Insert(project string, zone string, netwo + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { ++func (c *NetworkEdgeSecurityServicesInsertCall) RequestId(requestId string) *NetworkEdgeSecurityServicesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } + ++// ValidateOnly sets the optional parameter "validateOnly": If true, the ++// request will not be committed. ++func (c *NetworkEdgeSecurityServicesInsertCall) ValidateOnly(validateOnly bool) *NetworkEdgeSecurityServicesInsertCall { ++ c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) ++ return c ++} ++ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { ++func (c *NetworkEdgeSecurityServicesInsertCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -123140,21 +127877,21 @@ func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkE + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { ++func (c *NetworkEdgeSecurityServicesInsertCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { ++func (c *NetworkEdgeSecurityServicesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEdgeSecurityServicesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -123162,14 +127899,14 @@ func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -123178,19 +127915,19 @@ func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, +- "zone": c.zone, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.insert" call. ++// Do executes the "compute.networkEdgeSecurityServices.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123221,13 +127958,13 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope + } + return ret, nil + // { +- // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "description": "Creates a new service in the specified project using the data included in the request.", ++ // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + // "httpMethod": "POST", +- // "id": "compute.networkEndpointGroups.insert", ++ // "id": "compute.networkEdgeSecurityServices.insert", + // "parameterOrder": [ + // "project", +- // "zone" ++ // "region" + // ], + // "parameters": { + // "project": { +@@ -123237,21 +127974,27 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope + // "required": true, + // "type": "string" + // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, +- // "zone": { +- // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, +- // "type": "string" ++ // "validateOnly": { ++ // "description": "If true, the request will not be committed.", ++ // "location": "query", ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + // "request": { +- // "$ref": "NetworkEndpointGroup" ++ // "$ref": "NetworkEdgeSecurityService" + // }, + // "response": { + // "$ref": "Operation" +@@ -123264,183 +128007,127 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope + + } + +-// method id "compute.networkEndpointGroups.list": ++// method id "compute.networkEdgeSecurityServices.patch": + +-type NetworkEndpointGroupsListCall struct { +- s *Service +- project string +- zone string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type NetworkEdgeSecurityServicesPatchCall struct { ++ s *Service ++ project string ++ region string ++ networkEdgeSecurityService string ++ networkedgesecurityservice *NetworkEdgeSecurityService ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// List: Retrieves the list of network endpoint groups that are located +-// in the specified project and zone. ++// Patch: Patches the specified policy with the data included in the ++// request. + // ++// - networkEdgeSecurityService: Name of the network edge security ++// service to update. + // - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { +- c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region scoping this request. ++func (r *NetworkEdgeSecurityServicesService) Patch(project string, region string, networkEdgeSecurityService string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesPatchCall { ++ c := &NetworkEdgeSecurityServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- return c +-} +- +-// Filter sets the optional parameter "filter": A filter expression that +-// filters resources listed in the response. Most Compute resources +-// support two types of filter expressions: expressions that support +-// regular expressions and expressions that follow API improvement +-// proposal AIP-160. If you want to use AIP-160, your expression must +-// specify the field name, an operator, and the value that you want to +-// use for filtering. The value must be a string, a number, or a +-// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +-// or `:`. For example, if you are filtering Compute Engine instances, +-// you can exclude instances named `example-instance` by specifying +-// `name != example-instance`. The `:` operator can be used with string +-// fields to match substrings. For non-string fields it is equivalent to +-// the `=` operator. The `:*` comparison can be used to test whether a +-// key has been defined. For example, to find all objects with `owner` +-// label use: ``` labels.owner:* ``` You can also filter nested fields. +-// For example, you could specify `scheduling.automaticRestart = false` +-// to include instances only if they are not scheduled for automatic +-// restarts. You can use filtering on nested fields to filter based on +-// resource labels. To filter on multiple expressions, provide each +-// separate expression within parentheses. For example: ``` +-// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +-// ``` By default, each expression is an `AND` expression. However, you +-// can include `AND` and `OR` expressions explicitly. For example: ``` +-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +-// AND (scheduling.automaticRestart = true) ``` If you want to use a +-// regular expression, use the `eq` (equal) or `ne` (not equal) operator +-// against a single un-parenthesized expression with or without quotes +-// or against multiple parenthesized expressions. Examples: `fieldname +-// eq unquoted literal` `fieldname eq 'single quoted literal'` +-// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +-// (fieldname2 ne "literal")` The literal value is interpreted as a +-// regular expression using Google RE2 library syntax. The literal value +-// must match the entire field. For example, to filter for instances +-// that do not end with name "instance", you would use `name ne +-// .*instance`. +-func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { +- c.urlParams_.Set("filter", filter) +- return c +-} +- +-// MaxResults sets the optional parameter "maxResults": The maximum +-// number of results per page that should be returned. If the number of +-// available results is larger than `maxResults`, Compute Engine returns +-// a `nextPageToken` that can be used to get the next page of results in +-// subsequent list requests. Acceptable values are `0` to `500`, +-// inclusive. (Default: `500`) +-func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { +- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ c.region = region ++ c.networkEdgeSecurityService = networkEdgeSecurityService ++ c.networkedgesecurityservice = networkedgesecurityservice + return c + } + +-// OrderBy sets the optional parameter "orderBy": Sorts list results by +-// a certain order. By default, results are returned in alphanumerical +-// order based on the resource name. You can also sort results in +-// descending order based on the creation timestamp using +-// `orderBy="creationTimestamp desc". This sorts results based on the +-// `creationTimestamp` field in reverse chronological order (newest +-// result first). Use this to sort resources like operations so that the +-// newest operation is returned first. Currently, only sorting by `name` +-// or `creationTimestamp desc` is supported. +-func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { +- c.urlParams_.Set("orderBy", orderBy) ++// Paths sets the optional parameter "paths": ++func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEdgeSecurityServicesPatchCall { ++ c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + return c + } + +-// PageToken sets the optional parameter "pageToken": Specifies a page +-// token to use. Set `pageToken` to the `nextPageToken` returned by a +-// previous list request to get the next page of results. +-func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { +- c.urlParams_.Set("pageToken", pageToken) ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkEdgeSecurityServicesPatchCall) RequestId(requestId string) *NetworkEdgeSecurityServicesPatchCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + +-// ReturnPartialSuccess sets the optional parameter +-// "returnPartialSuccess": Opt-in for partial success behavior which +-// provides partial results in case of failure. The default value is +-// false. +-func (c *NetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListCall { +- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++// UpdateMask sets the optional parameter "updateMask": Indicates fields ++// to be updated as part of this request. ++func (c *NetworkEdgeSecurityServicesPatchCall) UpdateMask(updateMask string) *NetworkEdgeSecurityServicesPatchCall { ++ c.urlParams_.Set("updateMask", updateMask) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { ++func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { ++func (c *NetworkEdgeSecurityServicesPatchCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesPatchCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsListCall) Header() http.Header { ++func (c *NetworkEdgeSecurityServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, ++ "project": c.project, ++ "region": c.region, ++ "networkEdgeSecurityService": c.networkEdgeSecurityService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.list" call. +-// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +-// Any non-2xx status code is an error. Response headers are in either +-// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { ++// Do executes the "compute.networkEdgeSecurityServices.patch" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123459,7 +128146,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEndpointGroupList{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -123471,36 +128158,26 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo + } + return ret, nil + // { +- // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", +- // "httpMethod": "GET", +- // "id": "compute.networkEndpointGroups.list", ++ // "description": "Patches the specified policy with the data included in the request.", ++ // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "httpMethod": "PATCH", ++ // "id": "compute.networkEdgeSecurityServices.patch", + // "parameterOrder": [ + // "project", +- // "zone" ++ // "region", ++ // "networkEdgeSecurityService" + // ], + // "parameters": { +- // "filter": { +- // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- // "location": "query", +- // "type": "string" +- // }, +- // "maxResults": { +- // "default": "500", +- // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- // "format": "uint32", +- // "location": "query", +- // "minimum": "0", +- // "type": "integer" +- // }, +- // "orderBy": { +- // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- // "location": "query", ++ // "networkEdgeSecurityService": { ++ // "description": "Name of the network edge security service to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, + // "type": "string" + // }, +- // "pageToken": { +- // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "paths": { + // "location": "query", ++ // "repeated": true, + // "type": "string" + // }, + // "project": { +@@ -123510,80 +128187,58 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo + // "required": true, + // "type": "string" + // }, +- // "returnPartialSuccess": { +- // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", +- // "location": "query", +- // "type": "boolean" +- // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "region": { ++ // "description": "Name of the region scoping this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "updateMask": { ++ // "description": "Indicates fields to be updated as part of this request.", ++ // "format": "google-fieldmask", ++ // "location": "query", ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", ++ // "request": { ++ // "$ref": "NetworkEdgeSecurityService" ++ // }, + // "response": { +- // "$ref": "NetworkEndpointGroupList" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// Pages invokes f for each page of results. +-// A non-nil error returned from f will halt the iteration. +-// The provided context supersedes any context provided to the Context method. +-func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { +- c.ctx_ = ctx +- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point +- for { +- x, err := c.Do() +- if err != nil { +- return err +- } +- if err := f(x); err != nil { +- return err +- } +- if x.NextPageToken == "" { +- return nil +- } +- c.PageToken(x.NextPageToken) +- } +-} +- +-// method id "compute.networkEndpointGroups.listNetworkEndpoints": ++// method id "compute.networkEndpointGroups.aggregatedList": + +-type NetworkEndpointGroupsListNetworkEndpointsCall struct { +- s *Service +- project string +- zone string +- networkEndpointGroup string +- networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEndpointGroupsAggregatedListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// ListNetworkEndpoints: Lists the network endpoints in the specified +-// network endpoint group. ++// AggregatedList: Retrieves the list of network endpoint groups and ++// sorts them by zone. + // +-// - networkEndpointGroup: The name of the network endpoint group from +-// which you want to generate a list of included network endpoints. It +-// should comply with RFC1035. +-// - project: Project ID for this request. +-// - zone: The name of the zone where the network endpoint group is +-// located. It should comply with RFC1035. +-func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { +- c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { ++ c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.zone = zone +- c.networkEndpointGroup = networkEndpointGroup +- c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest + return c + } + +@@ -123622,18 +128277,31 @@ func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c + } + ++// IncludeAllScopes sets the optional parameter "includeAllScopes": ++// Indicates whether every visible scope for each scope type (zone, ++// region, global) should be included in the response. For new resource ++// types added after this field, the flag has no effect as new resource ++// types will always include every visible scope for each scope type in ++// response. For resource types which predate this field, if this flag ++// is omitted or false, only scopes of the scope types where the ++// resource type is expected to be found will be included. ++func (c *NetworkEndpointGroupsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEndpointGroupsAggregatedListCall { ++ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) ++ return c ++} ++ + // MaxResults sets the optional parameter "maxResults": The maximum + // number of results per page that should be returned. If the number of + // available results is larger than `maxResults`, Compute Engine returns + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -123647,7 +128315,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults in + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -123655,7 +128323,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -123664,7 +128332,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken stri + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -123672,68 +128340,73 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(ret + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { ++func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { ++func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEndpointGroups") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "networkEndpointGroup": c.networkEndpointGroup, ++ "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. +-// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +-// will be non-nil. Any non-2xx status code is an error. Response +-// headers are in either +-// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or ++// Do executes the "compute.networkEndpointGroups.aggregatedList" call. ++// Exactly one of *NetworkEndpointGroupAggregatedList or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or + // (if a response was returned at all) in + // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check + // whether the returned error was because http.StatusNotModified was + // returned. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { ++func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123752,7 +128425,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &NetworkEndpointGroupsListNetworkEndpoints{ ++ ret := &NetworkEndpointGroupAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -123764,14 +128437,12 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + } + return ret, nil + // { +- // "description": "Lists the network endpoints in the specified network endpoint group.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", +- // "httpMethod": "POST", +- // "id": "compute.networkEndpointGroups.listNetworkEndpoints", ++ // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", ++ // "flatPath": "projects/{project}/aggregated/networkEndpointGroups", ++ // "httpMethod": "GET", ++ // "id": "compute.networkEndpointGroups.aggregatedList", + // "parameterOrder": [ +- // "project", +- // "zone", +- // "networkEndpointGroup" ++ // "project" + // ], + // "parameters": { + // "filter": { +@@ -123779,6 +128450,11 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + // "location": "query", + // "type": "string" + // }, ++ // "includeAllScopes": { ++ // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -123787,12 +128463,6 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + // "minimum": "0", + // "type": "integer" + // }, +- // "networkEndpointGroup": { +- // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", +@@ -123814,20 +128484,11 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" +- // }, +- // "zone": { +- // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", +- // "location": "path", +- // "required": true, +- // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", +- // "request": { +- // "$ref": "NetworkEndpointGroupsListEndpointsRequest" +- // }, ++ // "path": "projects/{project}/aggregated/networkEndpointGroups", + // "response": { +- // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" ++ // "$ref": "NetworkEndpointGroupAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -123841,7 +128502,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { ++func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -123859,38 +128520,57 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Contex + } + } + +-// method id "compute.networkEndpointGroups.testIamPermissions": ++// method id "compute.networkEndpointGroups.attachNetworkEndpoints": + +-type NetworkEndpointGroupsTestIamPermissionsCall struct { +- s *Service +- project string +- zone string +- resource string +- testpermissionsrequest *TestPermissionsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { ++ s *Service ++ project string ++ zone string ++ networkEndpointGroup string ++ networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. ++// AttachNetworkEndpoints: Attach a list of network endpoints to the ++// specified network endpoint group. + // +-// - project: Project ID for this request. +-// - resource: Name or id of the resource for this request. +-// - zone: The name of the zone for this request. +-func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { +- c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - networkEndpointGroup: The name of the network endpoint group where ++// you are attaching network endpoints to. It should comply with ++// RFC1035. ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { ++ c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone +- c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest ++ c.networkEndpointGroup = networkEndpointGroup ++ c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -123898,21 +128578,21 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Fiel + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -123920,14 +128600,14 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*ht + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -123935,21 +128615,21 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*ht + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "resource": c.resource, ++ "project": c.project, ++ "zone": c.zone, ++ "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -123968,7 +128648,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &TestPermissionsResponse{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -123980,16 +128660,22 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", ++ // "description": "Attach a list of network endpoints to the specified network endpoint group.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "httpMethod": "POST", +- // "id": "compute.networkEndpointGroups.testIamPermissions", ++ // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", + // "parameterOrder": [ + // "project", + // "zone", +- // "resource" ++ // "networkEndpointGroup" + // ], + // "parameters": { ++ // "networkEndpointGroup": { ++ // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -123997,69 +128683,60 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", + // "type": "string" + // }, + // "zone": { +- // "description": "The name of the zone for this request.", ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "request": { +- // "$ref": "TestPermissionsRequest" ++ // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" + // }, + // "response": { +- // "$ref": "TestPermissionsResponse" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.networkFirewallPolicies.addAssociation": ++// method id "compute.networkEndpointGroups.delete": + +-type NetworkFirewallPoliciesAddAssociationCall struct { +- s *Service +- project string +- firewallPolicy string +- firewallpolicyassociation *FirewallPolicyAssociation +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEndpointGroupsDeleteCall struct { ++ s *Service ++ project string ++ zone string ++ networkEndpointGroup string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// AddAssociation: Inserts an association for the specified firewall +-// policy. ++// Delete: Deletes the specified network endpoint group. The network ++// endpoints in the NEG and the VM instances they belong to are not ++// terminated when the NEG is deleted. Note that the NEG cannot be ++// deleted if there are backend services referencing it. + // +-// - firewallPolicy: Name of the firewall policy to update. +-// - project: Project ID for this request. +-func (r *NetworkFirewallPoliciesService) AddAssociation(project string, firewallPolicy string, firewallpolicyassociation *FirewallPolicyAssociation) *NetworkFirewallPoliciesAddAssociationCall { +- c := &NetworkFirewallPoliciesAddAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - networkEndpointGroup: The name of the network endpoint group to ++// delete. It should comply with RFC1035. ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { ++ c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.firewallPolicy = firewallPolicy +- c.firewallpolicyassociation = firewallpolicyassociation +- return c +-} +- +-// ReplaceExistingAssociation sets the optional parameter +-// "replaceExistingAssociation": Indicates whether or not to replace it +-// if an association of the attachment already exists. This is false by +-// default, in which case an error will be returned if an association +-// already exists. +-func (c *NetworkFirewallPoliciesAddAssociationCall) ReplaceExistingAssociation(replaceExistingAssociation bool) *NetworkFirewallPoliciesAddAssociationCall { +- c.urlParams_.Set("replaceExistingAssociation", fmt.Sprint(replaceExistingAssociation)) ++ c.zone = zone ++ c.networkEndpointGroup = networkEndpointGroup + return c + } + +@@ -124074,7 +128751,7 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) ReplaceExistingAssociation(r + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkFirewallPoliciesAddAssociationCall) RequestId(requestId string) *NetworkFirewallPoliciesAddAssociationCall { ++func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -124082,7 +128759,7 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) RequestId(requestId string) + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkFirewallPoliciesAddAssociationCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesAddAssociationCall { ++func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124090,21 +128767,21 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) Fields(s ...googleapi.Field) + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkFirewallPoliciesAddAssociationCall) Context(ctx context.Context) *NetworkFirewallPoliciesAddAssociationCall { ++func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkFirewallPoliciesAddAssociationCall) Header() http.Header { ++func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkFirewallPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124112,35 +128789,31 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) doRequest(alt string) (*http + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyassociation) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "firewallPolicy": c.firewallPolicy, ++ "project": c.project, ++ "zone": c.zone, ++ "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkFirewallPolicies.addAssociation" call. ++// Do executes the "compute.networkEndpointGroups.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124171,19 +128844,19 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOpt + } + return ret, nil + // { +- // "description": "Inserts an association for the specified firewall policy.", +- // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", +- // "httpMethod": "POST", +- // "id": "compute.networkFirewallPolicies.addAssociation", ++ // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.networkEndpointGroups.delete", + // "parameterOrder": [ + // "project", +- // "firewallPolicy" ++ // "zone", ++ // "networkEndpointGroup" + // ], + // "parameters": { +- // "firewallPolicy": { +- // "description": "Name of the firewall policy to update.", ++ // "networkEndpointGroup": { ++ // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -124194,21 +128867,19 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOpt + // "required": true, + // "type": "string" + // }, +- // "replaceExistingAssociation": { +- // "description": "Indicates whether or not to replace it if an association of the attachment already exists. This is false by default, in which case an error will be returned if an association already exists.", +- // "location": "query", +- // "type": "boolean" +- // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", +- // "request": { +- // "$ref": "FirewallPolicyAssociation" +- // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "response": { + // "$ref": "Operation" + // }, +@@ -124220,45 +128891,33 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOpt + + } + +-// method id "compute.networkFirewallPolicies.addRule": ++// method id "compute.networkEndpointGroups.detachNetworkEndpoints": + +-type NetworkFirewallPoliciesAddRuleCall struct { +- s *Service +- project string +- firewallPolicy string +- firewallpolicyrule *FirewallPolicyRule +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { ++ s *Service ++ project string ++ zone string ++ networkEndpointGroup string ++ networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// AddRule: Inserts a rule into a firewall policy. ++// DetachNetworkEndpoints: Detach a list of network endpoints from the ++// specified network endpoint group. + // +-// - firewallPolicy: Name of the firewall policy to update. +-// - project: Project ID for this request. +-func (r *NetworkFirewallPoliciesService) AddRule(project string, firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *NetworkFirewallPoliciesAddRuleCall { +- c := &NetworkFirewallPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - networkEndpointGroup: The name of the network endpoint group where ++// you are removing network endpoints. It should comply with RFC1035. ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { ++ c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project +- c.firewallPolicy = firewallPolicy +- c.firewallpolicyrule = firewallpolicyrule +- return c +-} +- +-// MaxPriority sets the optional parameter "maxPriority": When +-// rule.priority is not specified, auto choose a unused priority between +-// minPriority and maxPriority>. This field is exclusive with +-// rule.priority. +-func (c *NetworkFirewallPoliciesAddRuleCall) MaxPriority(maxPriority int64) *NetworkFirewallPoliciesAddRuleCall { +- c.urlParams_.Set("maxPriority", fmt.Sprint(maxPriority)) +- return c +-} +- +-// MinPriority sets the optional parameter "minPriority": When +-// rule.priority is not specified, auto choose a unused priority between +-// minPriority and maxPriority>. This field is exclusive with +-// rule.priority. +-func (c *NetworkFirewallPoliciesAddRuleCall) MinPriority(minPriority int64) *NetworkFirewallPoliciesAddRuleCall { +- c.urlParams_.Set("minPriority", fmt.Sprint(minPriority)) ++ c.zone = zone ++ c.networkEndpointGroup = networkEndpointGroup ++ c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest + return c + } + +@@ -124273,7 +128932,7 @@ func (c *NetworkFirewallPoliciesAddRuleCall) MinPriority(minPriority int64) *Net + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *NetworkFirewallPoliciesAddRuleCall) RequestId(requestId string) *NetworkFirewallPoliciesAddRuleCall { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -124281,7 +128940,7 @@ func (c *NetworkFirewallPoliciesAddRuleCall) RequestId(requestId string) *Networ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *NetworkFirewallPoliciesAddRuleCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesAddRuleCall { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -124289,21 +128948,21 @@ func (c *NetworkFirewallPoliciesAddRuleCall) Fields(s ...googleapi.Field) *Netwo + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *NetworkFirewallPoliciesAddRuleCall) Context(ctx context.Context) *NetworkFirewallPoliciesAddRuleCall { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *NetworkFirewallPoliciesAddRuleCall) Header() http.Header { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *NetworkFirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -124311,14 +128970,14 @@ func (c *NetworkFirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Respon + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -124326,20 +128985,21 @@ func (c *NetworkFirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Respon + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "firewallPolicy": c.firewallPolicy, ++ "project": c.project, ++ "zone": c.zone, ++ "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.networkFirewallPolicies.addRule" call. ++// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -124370,34 +129030,22 @@ func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { +- // "description": "Inserts a rule into a firewall policy.", +- // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", ++ // "description": "Detach a list of network endpoints from the specified network endpoint group.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "httpMethod": "POST", +- // "id": "compute.networkFirewallPolicies.addRule", ++ // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", + // "parameterOrder": [ + // "project", +- // "firewallPolicy" ++ // "zone", ++ // "networkEndpointGroup" + // ], + // "parameters": { +- // "firewallPolicy": { +- // "description": "Name of the firewall policy to update.", ++ // "networkEndpointGroup": { ++ // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +- // "maxPriority": { +- // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", +- // "format": "int32", +- // "location": "query", +- // "type": "integer" +- // }, +- // "minPriority": { +- // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", +- // "format": "int32", +- // "location": "query", +- // "type": "integer" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -124409,11 +129057,17 @@ func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (* + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "request": { +- // "$ref": "FirewallPolicyRule" ++ // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -124426,25 +129080,1536 @@ func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (* + + } + +-// method id "compute.networkFirewallPolicies.cloneRules": ++// method id "compute.networkEndpointGroups.get": + +-type NetworkFirewallPoliciesCloneRulesCall struct { +- s *Service +- project string +- firewallPolicy string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type NetworkEndpointGroupsGetCall struct { ++ s *Service ++ project string ++ zone string ++ networkEndpointGroup string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// CloneRules: Copies rules to the specified firewall policy. ++// Get: Returns the specified network endpoint group. + // +-// - firewallPolicy: Name of the firewall policy to update. +-// - project: Project ID for this request. +-func (r *NetworkFirewallPoliciesService) CloneRules(project string, firewallPolicy string) *NetworkFirewallPoliciesCloneRulesCall { +- c := &NetworkFirewallPoliciesCloneRulesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.firewallPolicy = firewallPolicy ++// - networkEndpointGroup: The name of the network endpoint group. It ++// should comply with RFC1035. ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { ++ c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.networkEndpointGroup = networkEndpointGroup ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEndpointGroupsGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "networkEndpointGroup": c.networkEndpointGroup, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEndpointGroups.get" call. ++// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *NetworkEndpointGroup.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &NetworkEndpointGroup{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns the specified network endpoint group.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", ++ // "httpMethod": "GET", ++ // "id": "compute.networkEndpointGroups.get", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "networkEndpointGroup" ++ // ], ++ // "parameters": { ++ // "networkEndpointGroup": { ++ // "description": "The name of the network endpoint group. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", ++ // "response": { ++ // "$ref": "NetworkEndpointGroup" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkEndpointGroups.insert": ++ ++type NetworkEndpointGroupsInsertCall struct { ++ s *Service ++ project string ++ zone string ++ networkendpointgroup *NetworkEndpointGroup ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates a network endpoint group in the specified project ++// using the parameters that are included in the request. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone where you want to create the network ++// endpoint group. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { ++ c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.networkendpointgroup = networkendpointgroup ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEndpointGroups.insert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "httpMethod": "POST", ++ // "id": "compute.networkEndpointGroups.insert", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "request": { ++ // "$ref": "NetworkEndpointGroup" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkEndpointGroups.list": ++ ++type NetworkEndpointGroupsListCall struct { ++ s *Service ++ project string ++ zone string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// List: Retrieves the list of network endpoint groups that are located ++// in the specified project and zone. ++// ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { ++ c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *NetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEndpointGroupsListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEndpointGroups.list" call. ++// Exactly one of *NetworkEndpointGroupList or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &NetworkEndpointGroupList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "httpMethod": "GET", ++ // "id": "compute.networkEndpointGroups.list", ++ // "parameterOrder": [ ++ // "project", ++ // "zone" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups", ++ // "response": { ++ // "$ref": "NetworkEndpointGroupList" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.networkEndpointGroups.listNetworkEndpoints": ++ ++type NetworkEndpointGroupsListNetworkEndpointsCall struct { ++ s *Service ++ project string ++ zone string ++ networkEndpointGroup string ++ networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// ListNetworkEndpoints: Lists the network endpoints in the specified ++// network endpoint group. ++// ++// - networkEndpointGroup: The name of the network endpoint group from ++// which you want to generate a list of included network endpoints. It ++// should comply with RFC1035. ++// - project: Project ID for this request. ++// - zone: The name of the zone where the network endpoint group is ++// located. It should comply with RFC1035. ++func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.networkEndpointGroup = networkEndpointGroup ++ c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "networkEndpointGroup": c.networkEndpointGroup, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. ++// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error ++// will be non-nil. Any non-2xx status code is an error. Response ++// headers are in either ++// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &NetworkEndpointGroupsListNetworkEndpoints{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Lists the network endpoints in the specified network endpoint group.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", ++ // "httpMethod": "POST", ++ // "id": "compute.networkEndpointGroups.listNetworkEndpoints", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "networkEndpointGroup" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "networkEndpointGroup": { ++ // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", ++ // "request": { ++ // "$ref": "NetworkEndpointGroupsListEndpointsRequest" ++ // }, ++ // "response": { ++ // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.networkEndpointGroups.testIamPermissions": ++ ++type NetworkEndpointGroupsTestIamPermissionsCall struct { ++ s *Service ++ project string ++ zone string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - resource: Name or id of the resource for this request. ++// - zone: The name of the zone for this request. ++func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { ++ c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.networkEndpointGroups.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkFirewallPolicies.addAssociation": ++ ++type NetworkFirewallPoliciesAddAssociationCall struct { ++ s *Service ++ project string ++ firewallPolicy string ++ firewallpolicyassociation *FirewallPolicyAssociation ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// AddAssociation: Inserts an association for the specified firewall ++// policy. ++// ++// - firewallPolicy: Name of the firewall policy to update. ++// - project: Project ID for this request. ++func (r *NetworkFirewallPoliciesService) AddAssociation(project string, firewallPolicy string, firewallpolicyassociation *FirewallPolicyAssociation) *NetworkFirewallPoliciesAddAssociationCall { ++ c := &NetworkFirewallPoliciesAddAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.firewallPolicy = firewallPolicy ++ c.firewallpolicyassociation = firewallpolicyassociation ++ return c ++} ++ ++// ReplaceExistingAssociation sets the optional parameter ++// "replaceExistingAssociation": Indicates whether or not to replace it ++// if an association of the attachment already exists. This is false by ++// default, in which case an error will be returned if an association ++// already exists. ++func (c *NetworkFirewallPoliciesAddAssociationCall) ReplaceExistingAssociation(replaceExistingAssociation bool) *NetworkFirewallPoliciesAddAssociationCall { ++ c.urlParams_.Set("replaceExistingAssociation", fmt.Sprint(replaceExistingAssociation)) ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkFirewallPoliciesAddAssociationCall) RequestId(requestId string) *NetworkFirewallPoliciesAddAssociationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkFirewallPoliciesAddAssociationCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesAddAssociationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkFirewallPoliciesAddAssociationCall) Context(ctx context.Context) *NetworkFirewallPoliciesAddAssociationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkFirewallPoliciesAddAssociationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkFirewallPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyassociation) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "firewallPolicy": c.firewallPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkFirewallPolicies.addAssociation" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Inserts an association for the specified firewall policy.", ++ // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", ++ // "httpMethod": "POST", ++ // "id": "compute.networkFirewallPolicies.addAssociation", ++ // "parameterOrder": [ ++ // "project", ++ // "firewallPolicy" ++ // ], ++ // "parameters": { ++ // "firewallPolicy": { ++ // "description": "Name of the firewall policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "replaceExistingAssociation": { ++ // "description": "Indicates whether or not to replace it if an association of the attachment already exists. This is false by default, in which case an error will be returned if an association already exists.", ++ // "location": "query", ++ // "type": "boolean" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addAssociation", ++ // "request": { ++ // "$ref": "FirewallPolicyAssociation" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkFirewallPolicies.addRule": ++ ++type NetworkFirewallPoliciesAddRuleCall struct { ++ s *Service ++ project string ++ firewallPolicy string ++ firewallpolicyrule *FirewallPolicyRule ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// AddRule: Inserts a rule into a firewall policy. ++// ++// - firewallPolicy: Name of the firewall policy to update. ++// - project: Project ID for this request. ++func (r *NetworkFirewallPoliciesService) AddRule(project string, firewallPolicy string, firewallpolicyrule *FirewallPolicyRule) *NetworkFirewallPoliciesAddRuleCall { ++ c := &NetworkFirewallPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.firewallPolicy = firewallPolicy ++ c.firewallpolicyrule = firewallpolicyrule ++ return c ++} ++ ++// MaxPriority sets the optional parameter "maxPriority": When ++// rule.priority is not specified, auto choose a unused priority between ++// minPriority and maxPriority>. This field is exclusive with ++// rule.priority. ++func (c *NetworkFirewallPoliciesAddRuleCall) MaxPriority(maxPriority int64) *NetworkFirewallPoliciesAddRuleCall { ++ c.urlParams_.Set("maxPriority", fmt.Sprint(maxPriority)) ++ return c ++} ++ ++// MinPriority sets the optional parameter "minPriority": When ++// rule.priority is not specified, auto choose a unused priority between ++// minPriority and maxPriority>. This field is exclusive with ++// rule.priority. ++func (c *NetworkFirewallPoliciesAddRuleCall) MinPriority(minPriority int64) *NetworkFirewallPoliciesAddRuleCall { ++ c.urlParams_.Set("minPriority", fmt.Sprint(minPriority)) ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NetworkFirewallPoliciesAddRuleCall) RequestId(requestId string) *NetworkFirewallPoliciesAddRuleCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NetworkFirewallPoliciesAddRuleCall) Fields(s ...googleapi.Field) *NetworkFirewallPoliciesAddRuleCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NetworkFirewallPoliciesAddRuleCall) Context(ctx context.Context) *NetworkFirewallPoliciesAddRuleCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NetworkFirewallPoliciesAddRuleCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NetworkFirewallPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewallpolicyrule) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "firewallPolicy": c.firewallPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.networkFirewallPolicies.addRule" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Inserts a rule into a firewall policy.", ++ // "flatPath": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", ++ // "httpMethod": "POST", ++ // "id": "compute.networkFirewallPolicies.addRule", ++ // "parameterOrder": [ ++ // "project", ++ // "firewallPolicy" ++ // ], ++ // "parameters": { ++ // "firewallPolicy": { ++ // "description": "Name of the firewall policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "maxPriority": { ++ // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "minPriority": { ++ // "description": "When rule.priority is not specified, auto choose a unused priority between minPriority and maxPriority\u003e. This field is exclusive with rule.priority.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/firewallPolicies/{firewallPolicy}/addRule", ++ // "request": { ++ // "$ref": "FirewallPolicyRule" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.networkFirewallPolicies.cloneRules": ++ ++type NetworkFirewallPoliciesCloneRulesCall struct { ++ s *Service ++ project string ++ firewallPolicy string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// CloneRules: Copies rules to the specified firewall policy. ++// ++// - firewallPolicy: Name of the firewall policy to update. ++// - project: Project ID for this request. ++func (r *NetworkFirewallPoliciesService) CloneRules(project string, firewallPolicy string) *NetworkFirewallPoliciesCloneRulesCall { ++ c := &NetworkFirewallPoliciesCloneRulesCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.firewallPolicy = firewallPolicy + return c + } + +@@ -145881,9 +152046,367 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", ++ // "path": "projects/{project}/regions/{region}/backendServices/{backendService}", ++ // "request": { ++ // "$ref": "BackendService" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionBackendServices.setIamPolicy": ++ ++type RegionBackendServicesSetIamPolicyCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ regionsetpolicyrequest *RegionSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *RegionBackendServicesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionBackendServicesSetIamPolicyCall { ++ c := &RegionBackendServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.resource = resource ++ c.regionsetpolicyrequest = regionsetpolicyrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionBackendServicesSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesSetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionBackendServicesSetIamPolicyCall) Context(ctx context.Context) *RegionBackendServicesSetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionBackendServicesSetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionBackendServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionBackendServices.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *RegionBackendServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.regionBackendServices.setIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", ++ // "request": { ++ // "$ref": "RegionSetPolicyRequest" ++ // }, ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionBackendServices.setSecurityPolicy": ++ ++type RegionBackendServicesSetSecurityPolicyCall struct { ++ s *Service ++ project string ++ region string ++ backendService string ++ securitypolicyreference *SecurityPolicyReference ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified backend service. For more information, see Google Cloud ++// Armor Overview ++// ++// - backendService: Name of the BackendService resource to which the ++// security policy should be set. The name should conform to RFC1035. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionBackendServicesService) SetSecurityPolicy(project string, region string, backendService string, securitypolicyreference *SecurityPolicyReference) *RegionBackendServicesSetSecurityPolicyCall { ++ c := &RegionBackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.backendService = backendService ++ c.securitypolicyreference = securitypolicyreference ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionBackendServicesSetSecurityPolicyCall) RequestId(requestId string) *RegionBackendServicesSetSecurityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionBackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesSetSecurityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionBackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *RegionBackendServicesSetSecurityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionBackendServicesSetSecurityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionBackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "backendService": c.backendService, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionBackendServices.setSecurityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionBackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.regionBackendServices.setSecurityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "backendService" ++ // ], ++ // "parameters": { ++ // "backendService": { ++ // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", + // "request": { +- // "$ref": "BackendService" ++ // "$ref": "SecurityPolicyReference" + // }, + // "response": { + // "$ref": "Operation" +@@ -145896,174 +152419,6 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper + + } + +-// method id "compute.regionBackendServices.setIamPolicy": +- +-type RegionBackendServicesSetIamPolicyCall struct { +- s *Service +- project string +- region string +- resource string +- regionsetpolicyrequest *RegionSetPolicyRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header +-} +- +-// SetIamPolicy: Sets the access control policy on the specified +-// resource. Replaces any existing policy. +-// +-// - project: Project ID for this request. +-// - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *RegionBackendServicesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionBackendServicesSetIamPolicyCall { +- c := &RegionBackendServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.region = region +- c.resource = resource +- c.regionsetpolicyrequest = regionsetpolicyrequest +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *RegionBackendServicesSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesSetIamPolicyCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *RegionBackendServicesSetIamPolicyCall) Context(ctx context.Context) *RegionBackendServicesSetIamPolicyCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *RegionBackendServicesSetIamPolicyCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *RegionBackendServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.regionBackendServices.setIamPolicy" call. +-// Exactly one of *Policy or error will be non-nil. Any non-2xx status +-// code is an error. Response headers are in either +-// *Policy.ServerResponse.Header or (if a response was returned at all) +-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +-// check whether the returned error was because http.StatusNotModified +-// was returned. +-func (c *RegionBackendServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &Policy{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", +- // "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", +- // "httpMethod": "POST", +- // "id": "compute.regionBackendServices.setIamPolicy", +- // "parameterOrder": [ +- // "project", +- // "region", +- // "resource" +- // ], +- // "parameters": { +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // }, +- // "region": { +- // "description": "The name of the region for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", +- // "request": { +- // "$ref": "RegionSetPolicyRequest" +- // }, +- // "response": { +- // "$ref": "Policy" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" +- // ] +- // } +- +-} +- + // method id "compute.regionBackendServices.testIamPermissions": + + type RegionBackendServicesTestIamPermissionsCall struct { +@@ -148591,6 +154946,182 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* + + } + ++// method id "compute.regionDisks.bulkInsert": ++ ++type RegionDisksBulkInsertCall struct { ++ s *Service ++ project string ++ region string ++ bulkinsertdiskresource *BulkInsertDiskResource ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// BulkInsert: Bulk create a set of disks. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++func (r *RegionDisksService) BulkInsert(project string, region string, bulkinsertdiskresource *BulkInsertDiskResource) *RegionDisksBulkInsertCall { ++ c := &RegionDisksBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.bulkinsertdiskresource = bulkinsertdiskresource ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionDisksBulkInsertCall) RequestId(requestId string) *RegionDisksBulkInsertCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionDisksBulkInsertCall) Fields(s ...googleapi.Field) *RegionDisksBulkInsertCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionDisksBulkInsertCall) Context(ctx context.Context) *RegionDisksBulkInsertCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionDisksBulkInsertCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionDisksBulkInsertCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertdiskresource) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/bulkInsert") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionDisks.bulkInsert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionDisksBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Bulk create a set of disks.", ++ // "flatPath": "projects/{project}/regions/{region}/disks/bulkInsert", ++ // "httpMethod": "POST", ++ // "id": "compute.regionDisks.bulkInsert", ++ // "parameterOrder": [ ++ // "project", ++ // "region" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/disks/bulkInsert", ++ // "request": { ++ // "$ref": "BulkInsertDiskResource" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionDisks.createSnapshot": + + type RegionDisksCreateSnapshotCall struct { +@@ -150534,6 +157065,553 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + ++// method id "compute.regionDisks.startAsyncReplication": ++ ++type RegionDisksStartAsyncReplicationCall struct { ++ s *Service ++ project string ++ region string ++ disk string ++ regiondisksstartasyncreplicationrequest *RegionDisksStartAsyncReplicationRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// StartAsyncReplication: Starts asynchronous replication. Must be ++// invoked on the primary disk. ++// ++// - disk: The name of the persistent disk. ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++func (r *RegionDisksService) StartAsyncReplication(project string, region string, disk string, regiondisksstartasyncreplicationrequest *RegionDisksStartAsyncReplicationRequest) *RegionDisksStartAsyncReplicationCall { ++ c := &RegionDisksStartAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.disk = disk ++ c.regiondisksstartasyncreplicationrequest = regiondisksstartasyncreplicationrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionDisksStartAsyncReplicationCall) RequestId(requestId string) *RegionDisksStartAsyncReplicationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionDisksStartAsyncReplicationCall) Fields(s ...googleapi.Field) *RegionDisksStartAsyncReplicationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionDisksStartAsyncReplicationCall) Context(ctx context.Context) *RegionDisksStartAsyncReplicationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionDisksStartAsyncReplicationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionDisksStartAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksstartasyncreplicationrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "disk": c.disk, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionDisks.startAsyncReplication" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionDisksStartAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Starts asynchronous replication. Must be invoked on the primary disk.", ++ // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", ++ // "httpMethod": "POST", ++ // "id": "compute.regionDisks.startAsyncReplication", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "disk" ++ // ], ++ // "parameters": { ++ // "disk": { ++ // "description": "The name of the persistent disk.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", ++ // "request": { ++ // "$ref": "RegionDisksStartAsyncReplicationRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionDisks.stopAsyncReplication": ++ ++type RegionDisksStopAsyncReplicationCall struct { ++ s *Service ++ project string ++ region string ++ disk string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// StopAsyncReplication: Stops asynchronous replication. Can be invoked ++// either on the primary or on the secondary disk. ++// ++// - disk: The name of the persistent disk. ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++func (r *RegionDisksService) StopAsyncReplication(project string, region string, disk string) *RegionDisksStopAsyncReplicationCall { ++ c := &RegionDisksStopAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.disk = disk ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionDisksStopAsyncReplicationCall) RequestId(requestId string) *RegionDisksStopAsyncReplicationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionDisksStopAsyncReplicationCall) Fields(s ...googleapi.Field) *RegionDisksStopAsyncReplicationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionDisksStopAsyncReplicationCall) Context(ctx context.Context) *RegionDisksStopAsyncReplicationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionDisksStopAsyncReplicationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionDisksStopAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "disk": c.disk, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionDisks.stopAsyncReplication" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionDisksStopAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", ++ // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", ++ // "httpMethod": "POST", ++ // "id": "compute.regionDisks.stopAsyncReplication", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "disk" ++ // ], ++ // "parameters": { ++ // "disk": { ++ // "description": "The name of the persistent disk.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionDisks.stopGroupAsyncReplication": ++ ++type RegionDisksStopGroupAsyncReplicationCall struct { ++ s *Service ++ project string ++ region string ++ disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// StopGroupAsyncReplication: Stops asynchronous replication for a ++// consistency group of disks. Can be invoked either in the primary or ++// secondary scope. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. This must be the ++// region of the primary or secondary disks in the consistency group. ++func (r *RegionDisksService) StopGroupAsyncReplication(project string, region string, disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource) *RegionDisksStopGroupAsyncReplicationCall { ++ c := &RegionDisksStopGroupAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.disksstopgroupasyncreplicationresource = disksstopgroupasyncreplicationresource ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionDisksStopGroupAsyncReplicationCall) RequestId(requestId string) *RegionDisksStopGroupAsyncReplicationCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionDisksStopGroupAsyncReplicationCall) Fields(s ...googleapi.Field) *RegionDisksStopGroupAsyncReplicationCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionDisksStopGroupAsyncReplicationCall) Context(ctx context.Context) *RegionDisksStopGroupAsyncReplicationCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionDisksStopGroupAsyncReplicationCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionDisksStopGroupAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksstopgroupasyncreplicationresource) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionDisks.stopGroupAsyncReplication" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionDisksStopGroupAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", ++ // "flatPath": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", ++ // "httpMethod": "POST", ++ // "id": "compute.regionDisks.stopGroupAsyncReplication", ++ // "parameterOrder": [ ++ // "project", ++ // "region" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request. This must be the region of the primary or secondary disks in the consistency group.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", ++ // "request": { ++ // "$ref": "DisksStopGroupAsyncReplicationResource" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionDisks.testIamPermissions": + + type RegionDisksTestIamPermissionsCall struct { +@@ -155928,7 +163006,1025 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuc + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListPerInstanceConfigsCall { ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListPerInstanceConfigsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListPerInstanceConfigsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "instanceGroupManager": c.instanceGroupManager, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.listPerInstanceConfigs" call. ++// Exactly one of *RegionInstanceGroupManagersListInstanceConfigsResp or ++// error will be non-nil. Any non-2xx status code is an error. Response ++// headers are in either ++// *RegionInstanceGroupManagersListInstanceConfigsResp.ServerResponse.Hea ++// der or (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstanceConfigsResp, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Lists all of the per-instance configurations defined for the managed instance group. The orderBy query parameter is not supported.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "instanceGroupManager" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "instanceGroupManager": { ++ // "description": "The name of the managed instance group. It should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request, should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", ++ // "response": { ++ // "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstanceConfigsResp) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ ++// method id "compute.regionInstanceGroupManagers.patch": ++ ++type RegionInstanceGroupManagersPatchCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ instancegroupmanager *InstanceGroupManager ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Patch: Updates a managed instance group using the information that ++// you specify in the request. This operation is marked as DONE when the ++// group is patched even if the instances in the group are still in the ++// process of being patched. You must separately verify the status of ++// the individual instances with the listmanagedinstances method. This ++// method supports PATCH semantics and uses the JSON merge patch format ++// and processing rules. If you update your group to specify a new ++// template or instance configuration, it's possible that your intended ++// specification for each VM in the group is different from the current ++// state of that VM. To learn how to apply an updated configuration to ++// the VMs in a MIG, see Updating instances in a MIG. ++// ++// - instanceGroupManager: The name of the instance group manager. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { ++ c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instanceGroupManager = instanceGroupManager ++ c.instancegroupmanager = instancegroupmanager ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("PATCH", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "instanceGroupManager": c.instanceGroupManager, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.patch" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", ++ // "httpMethod": "PATCH", ++ // "id": "compute.regionInstanceGroupManagers.patch", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "instanceGroupManager" ++ // ], ++ // "parameters": { ++ // "instanceGroupManager": { ++ // "description": "The name of the instance group manager.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", ++ // "request": { ++ // "$ref": "InstanceGroupManager" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroupManagers.patchPerInstanceConfigs": ++ ++type RegionInstanceGroupManagersPatchPerInstanceConfigsCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// PatchPerInstanceConfigs: Inserts or patches per-instance ++// configurations for the managed instance group. perInstanceConfig.name ++// serves as a key used to distinguish whether to perform insert or ++// patch. ++// ++// - instanceGroupManager: The name of the managed instance group. It ++// should conform to RFC1035. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request, should conform to ++// RFC1035. ++func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++ c := &RegionInstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instanceGroupManager = instanceGroupManager ++ c.regioninstancegroupmanagerpatchinstanceconfigreq = regioninstancegroupmanagerpatchinstanceconfigreq ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerpatchinstanceconfigreq) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "instanceGroupManager": c.instanceGroupManager, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.patchPerInstanceConfigs" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Inserts or patches per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "instanceGroupManager" ++ // ], ++ // "parameters": { ++ // "instanceGroupManager": { ++ // "description": "The name of the managed instance group. It should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request, should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", ++ // "request": { ++ // "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroupManagers.recreateInstances": ++ ++type RegionInstanceGroupManagersRecreateInstancesCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// RecreateInstances: Flags the specified VM instances in the managed ++// instance group to be immediately recreated. Each instance is ++// recreated using the group's current configuration. This operation is ++// marked as DONE when the flag is set even if the instances have not ++// yet been recreated. You must separately verify the status of each ++// instance by checking its currentAction field; for more information, ++// see Checking the status of managed instances. If the group is part of ++// a backend service that has enabled connection draining, it can take ++// up to 60 seconds after the connection draining duration has elapsed ++// before the VM instance is removed or deleted. You can specify a ++// maximum of 1000 instances with this method per request. ++// ++// - instanceGroupManager: Name of the managed instance group. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { ++ c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instanceGroupManager = instanceGroupManager ++ c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "instanceGroupManager": c.instanceGroupManager, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.recreateInstances", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "instanceGroupManager" ++ // ], ++ // "parameters": { ++ // "instanceGroupManager": { ++ // "description": "Name of the managed instance group.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", ++ // "request": { ++ // "$ref": "RegionInstanceGroupManagersRecreateRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroupManagers.resize": ++ ++type RegionInstanceGroupManagersResizeCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Resize: Changes the intended size of the managed instance group. If ++// you increase the size, the group creates new instances using the ++// current instance template. If you decrease the size, the group ++// deletes one or more instances. The resize operation is marked DONE if ++// the resize request is successful. The underlying actions take ++// additional time. You must separately verify the status of the ++// creating or deleting actions with the listmanagedinstances method. If ++// the group is part of a backend service that has enabled connection ++// draining, it can take up to 60 seconds after the connection draining ++// duration has elapsed before the VM instance is removed or deleted. ++// ++// - instanceGroupManager: Name of the managed instance group. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - size: Number of instances that should exist in this instance group ++// manager. ++func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { ++ c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instanceGroupManager = instanceGroupManager ++ c.urlParams_.Set("size", fmt.Sprint(size)) ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "instanceGroupManager": c.instanceGroupManager, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.resize" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.resize", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "instanceGroupManager", ++ // "size" ++ // ], ++ // "parameters": { ++ // "instanceGroupManager": { ++ // "description": "Name of the managed instance group.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "size": { ++ // "description": "Number of instances that should exist in this instance group manager.", ++ // "format": "int32", ++ // "location": "query", ++ // "minimum": "0", ++ // "required": true, ++ // "type": "integer" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroupManagers.resizeAdvanced": ++ ++type RegionInstanceGroupManagersResizeAdvancedCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagersresizeadvancedrequest *RegionInstanceGroupManagersResizeAdvancedRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// ResizeAdvanced: Resizes the regional managed instance group with ++// advanced configuration options like disabling creation retries. This ++// is an extended version of the resize method. If you increase the ++// size, the group creates new instances using the current instance ++// template. If you decrease the size, the group deletes one or more ++// instances. The resize operation is marked DONE if the resize request ++// is successful. The underlying actions take additional time. You must ++// separately verify the status of the creating or deleting actions with ++// the get or listmanagedinstances method. If the group is part of a ++// backend service that has enabled connection draining, it can take up ++// to 60 seconds after the connection draining duration has elapsed ++// before the VM instance is removed or deleted. ++// ++// - instanceGroupManager: The name of the managed instance group. It ++// must be a string that meets the requirements in RFC1035. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. It must be a ++// string that meets the requirements in RFC1035. ++func (r *RegionInstanceGroupManagersService) ResizeAdvanced(project string, region string, instanceGroupManager string, regioninstancegroupmanagersresizeadvancedrequest *RegionInstanceGroupManagersResizeAdvancedRequest) *RegionInstanceGroupManagersResizeAdvancedCall { ++ c := &RegionInstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instanceGroupManager = instanceGroupManager ++ c.regioninstancegroupmanagersresizeadvancedrequest = regioninstancegroupmanagersresizeadvancedrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeAdvancedCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeAdvancedCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -155936,21 +164032,21 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...goog + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListPerInstanceConfigsCall { ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeAdvancedCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -155958,9 +164054,14 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt st + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersresizeadvancedrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -155975,16 +164076,14 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt st + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.listPerInstanceConfigs" call. +-// Exactly one of *RegionInstanceGroupManagersListInstanceConfigsResp or +-// error will be non-nil. Any non-2xx status code is an error. Response +-// headers are in either +-// *RegionInstanceGroupManagersListInstanceConfigsResp.ServerResponse.Hea +-// der or (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstanceConfigsResp, error) { ++// Do executes the "compute.regionInstanceGroupManagers.resizeAdvanced" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -156003,7 +164102,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -156015,45 +164114,22 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl + } + return ret, nil + // { +- // "description": "Lists all of the per-instance configurations defined for the managed instance group. The orderBy query parameter is not supported.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", ++ // "description": "Resizes the regional managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the get or listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", ++ // "id": "compute.regionInstanceGroupManagers.resizeAdvanced", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { +- // "filter": { +- // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- // "location": "query", +- // "type": "string" +- // }, + // "instanceGroupManager": { +- // "description": "The name of the managed instance group. It should conform to RFC1035.", ++ // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, +- // "maxResults": { +- // "default": "500", +- // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- // "format": "uint32", +- // "location": "query", +- // "minimum": "0", +- // "type": "integer" +- // }, +- // "orderBy": { +- // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- // "location": "query", +- // "type": "string" +- // }, +- // "pageToken": { +- // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +- // "location": "query", +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -156062,85 +164138,58 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request, should conform to RFC1035.", ++ // "description": "Name of the region scoping this request. It must be a string that meets the requirements in RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, +- // "returnPartialSuccess": { +- // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", +- // "type": "boolean" ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", ++ // "request": { ++ // "$ref": "RegionInstanceGroupManagersResizeAdvancedRequest" ++ // }, + // "response": { +- // "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// Pages invokes f for each page of results. +-// A non-nil error returned from f will halt the iteration. +-// The provided context supersedes any context provided to the Context method. +-func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstanceConfigsResp) error) error { +- c.ctx_ = ctx +- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point +- for { +- x, err := c.Do() +- if err != nil { +- return err +- } +- if err := f(x); err != nil { +- return err +- } +- if x.NextPageToken == "" { +- return nil +- } +- c.PageToken(x.NextPageToken) +- } +-} +- +-// method id "compute.regionInstanceGroupManagers.patch": ++// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": + +-type RegionInstanceGroupManagersPatchCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- instancegroupmanager *InstanceGroupManager +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Patch: Updates a managed instance group using the information that +-// you specify in the request. This operation is marked as DONE when the +-// group is patched even if the instances in the group are still in the +-// process of being patched. You must separately verify the status of +-// the individual instances with the listmanagedinstances method. This +-// method supports PATCH semantics and uses the JSON merge patch format +-// and processing rules. If you update your group to specify a new +-// template or instance configuration, it's possible that your intended +-// specification for each VM in the group is different from the current +-// state of that VM. To learn how to apply an updated configuration to +-// the VMs in a MIG, see Updating instances in a MIG. ++// SetAutoHealingPolicies: Modifies the autohealing policy for the ++// instances in this managed instance group. [Deprecated] This method is ++// deprecated. Use regionInstanceGroupManagers.patch instead. + // +-// - instanceGroupManager: The name of the instance group manager. ++// - instanceGroupManager: Name of the managed instance group. + // - project: Project ID for this request. + // - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { +- c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { ++ c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager +- c.instancegroupmanager = instancegroupmanager ++ c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest + return c + } + +@@ -156155,7 +164204,7 @@ func (r *RegionInstanceGroupManagersService) Patch(project string, region string + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -156163,7 +164212,7 @@ func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *Regi + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -156171,21 +164220,21 @@ func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *Reg + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -156193,16 +164242,16 @@ func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Resp + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PATCH", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -156215,14 +164264,14 @@ func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Resp + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.patch" call. ++// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -156253,10 +164302,11 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", +- // "httpMethod": "PATCH", +- // "id": "compute.regionInstanceGroupManagers.patch", ++ // "deprecated": true, ++ // "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use regionInstanceGroupManagers.patch instead.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", + // "parameterOrder": [ + // "project", + // "region", +@@ -156264,7 +164314,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) + // ], + // "parameters": { + // "instanceGroupManager": { +- // "description": "The name of the instance group manager.", ++ // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -156288,9 +164338,9 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "request": { +- // "$ref": "InstanceGroupManager" ++ // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -156303,35 +164353,32 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.regionInstanceGroupManagers.patchPerInstanceConfigs": ++// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": + +-type RegionInstanceGroupManagersPatchPerInstanceConfigsCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceGroupManagersSetInstanceTemplateCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// PatchPerInstanceConfigs: Inserts or patches per-instance +-// configurations for the managed instance group. perInstanceConfig.name +-// serves as a key used to distinguish whether to perform insert or +-// patch. ++// SetInstanceTemplate: Sets the instance template to use when creating ++// new instances or recreating instances in this group. Existing ++// instances are not affected. + // +-// - instanceGroupManager: The name of the managed instance group. It +-// should conform to RFC1035. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request, should conform to +-// RFC1035. +-func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { +- c := &RegionInstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instanceGroupManager: The name of the managed instance group. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { ++ c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagerpatchinstanceconfigreq = regioninstancegroupmanagerpatchinstanceconfigreq ++ c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest + return c + } + +@@ -156346,7 +164393,7 @@ func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project str + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -156354,7 +164401,7 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(reque + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -156362,21 +164409,21 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...goo + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -156384,14 +164431,14 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt s + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerpatchinstanceconfigreq) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -156406,14 +164453,14 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt s + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.patchPerInstanceConfigs" call. ++// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -156444,10 +164491,10 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog + } + return ret, nil + // { +- // "description": "Inserts or patches per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", ++ // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", ++ // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", + // "parameterOrder": [ + // "project", + // "region", +@@ -156455,7 +164502,7 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog + // ], + // "parameters": { + // "instanceGroupManager": { +- // "description": "The name of the managed instance group. It should conform to RFC1035.", ++ // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -156468,7 +164515,7 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request, should conform to RFC1035.", ++ // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -156479,9 +164526,9 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { +- // "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" ++ // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -156494,40 +164541,32 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog + + } + +-// method id "compute.regionInstanceGroupManagers.recreateInstances": ++// method id "compute.regionInstanceGroupManagers.setTargetPools": + +-type RegionInstanceGroupManagersRecreateInstancesCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceGroupManagersSetTargetPoolsCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// RecreateInstances: Flags the specified VM instances in the managed +-// instance group to be immediately recreated. Each instance is +-// recreated using the group's current configuration. This operation is +-// marked as DONE when the flag is set even if the instances have not +-// yet been recreated. You must separately verify the status of each +-// instance by checking its currentAction field; for more information, +-// see Checking the status of managed instances. If the group is part of +-// a backend service that has enabled connection draining, it can take +-// up to 60 seconds after the connection draining duration has elapsed +-// before the VM instance is removed or deleted. You can specify a +-// maximum of 1000 instances with this method per request. ++// SetTargetPools: Modifies the target pools to which all new instances ++// in this group are assigned. Existing instances in the group are not ++// affected. + // + // - instanceGroupManager: Name of the managed instance group. + // - project: Project ID for this request. + // - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { +- c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { ++ c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest ++ c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest + return c + } + +@@ -156542,7 +164581,7 @@ func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, r + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -156550,7 +164589,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId s + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -156558,21 +164597,21 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -156580,14 +164619,14 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -156602,14 +164641,14 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. ++// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -156640,10 +164679,10 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. + } + return ret, nil + // { +- // "description": "Flags the specified VM instances in the managed instance group to be immediately recreated. Each instance is recreated using the group's current configuration. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of each instance by checking its currentAction field; for more information, see Checking the status of managed instances. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted. You can specify a maximum of 1000 instances with this method per request.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", ++ // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.recreateInstances", ++ // "id": "compute.regionInstanceGroupManagers.setTargetPools", + // "parameterOrder": [ + // "project", + // "region", +@@ -156675,9 +164714,9 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "request": { +- // "$ref": "RegionInstanceGroupManagersRecreateRequest" ++ // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -156690,40 +164729,208 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. + + } + +-// method id "compute.regionInstanceGroupManagers.resize": ++// method id "compute.regionInstanceGroupManagers.testIamPermissions": + +-type RegionInstanceGroupManagersResizeCall struct { ++type RegionInstanceGroupManagersTestIamPermissionsCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { ++ c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceGroupManagers.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroupManagers.update": ++ ++type RegionInstanceGroupManagersUpdateCall struct { + s *Service + project string + region string + instanceGroupManager string ++ instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header + } + +-// Resize: Changes the intended size of the managed instance group. If +-// you increase the size, the group creates new instances using the +-// current instance template. If you decrease the size, the group +-// deletes one or more instances. The resize operation is marked DONE if +-// the resize request is successful. The underlying actions take +-// additional time. You must separately verify the status of the +-// creating or deleting actions with the listmanagedinstances method. If +-// the group is part of a backend service that has enabled connection +-// draining, it can take up to 60 seconds after the connection draining +-// duration has elapsed before the VM instance is removed or deleted. ++// Update: Updates a managed instance group using the information that ++// you specify in the request. This operation is marked as DONE when the ++// group is updated even if the instances in the group have not yet been ++// updated. You must separately verify the status of the individual ++// instances with the listmanagedinstances method. If you update your ++// group to specify a new template or instance configuration, it's ++// possible that your intended specification for each VM in the group is ++// different from the current state of that VM. To learn how to apply an ++// updated configuration to the VMs in a MIG, see Updating instances in ++// a MIG. + // +-// - instanceGroupManager: Name of the managed instance group. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-// - size: Number of instances that should exist in this instance group +-// manager. +-func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { +- c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instanceGroupManager: The name of the instance group manager. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { ++ c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager +- c.urlParams_.Set("size", fmt.Sprint(size)) ++ c.instancegroupmanager = instancegroupmanager + return c + } + +@@ -156738,7 +164945,7 @@ func (r *RegionInstanceGroupManagersService) Resize(project string, region strin + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { ++func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -156746,7 +164953,7 @@ func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *Reg + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { ++func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -156754,21 +164961,21 @@ func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *Re + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { ++func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -156776,11 +164983,16 @@ func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Res + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } +@@ -156793,14 +165005,14 @@ func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Res + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.resize" call. ++// Do executes the "compute.regionInstanceGroupManagers.update" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -156831,19 +165043,18 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", +- // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.resize", ++ // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", ++ // "httpMethod": "PUT", ++ // "id": "compute.regionInstanceGroupManagers.update", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroupManager", +- // "size" ++ // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { +- // "description": "Name of the managed instance group.", ++ // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -156865,17 +165076,12 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" +- // }, +- // "size": { +- // "description": "Number of instances that should exist in this instance group manager.", +- // "format": "int32", +- // "location": "query", +- // "minimum": "0", +- // "required": true, +- // "type": "integer" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", ++ // "request": { ++ // "$ref": "InstanceGroupManager" ++ // }, + // "response": { + // "$ref": "Operation" + // }, +@@ -156887,43 +165093,35 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.regionInstanceGroupManagers.resizeAdvanced": ++// method id "compute.regionInstanceGroupManagers.updatePerInstanceConfigs": + +-type RegionInstanceGroupManagersResizeAdvancedCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagersresizeadvancedrequest *RegionInstanceGroupManagersResizeAdvancedRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroupManager string ++ regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// ResizeAdvanced: Resizes the regional managed instance group with +-// advanced configuration options like disabling creation retries. This +-// is an extended version of the resize method. If you increase the +-// size, the group creates new instances using the current instance +-// template. If you decrease the size, the group deletes one or more +-// instances. The resize operation is marked DONE if the resize request +-// is successful. The underlying actions take additional time. You must +-// separately verify the status of the creating or deleting actions with +-// the get or listmanagedinstances method. If the group is part of a +-// backend service that has enabled connection draining, it can take up +-// to 60 seconds after the connection draining duration has elapsed +-// before the VM instance is removed or deleted. ++// UpdatePerInstanceConfigs: Inserts or updates per-instance ++// configurations for the managed instance group. perInstanceConfig.name ++// serves as a key used to distinguish whether to perform insert or ++// patch. + // + // - instanceGroupManager: The name of the managed instance group. It +-// must be a string that meets the requirements in RFC1035. ++// should conform to RFC1035. + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. It must be a +-// string that meets the requirements in RFC1035. +-func (r *RegionInstanceGroupManagersService) ResizeAdvanced(project string, region string, instanceGroupManager string, regioninstancegroupmanagersresizeadvancedrequest *RegionInstanceGroupManagersResizeAdvancedRequest) *RegionInstanceGroupManagersResizeAdvancedCall { +- c := &RegionInstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: Name of the region scoping this request, should conform to ++// RFC1035. ++func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { ++ c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagersresizeadvancedrequest = regioninstancegroupmanagersresizeadvancedrequest ++ c.regioninstancegroupmanagerupdateinstanceconfigreq = regioninstancegroupmanagerupdateinstanceconfigreq + return c + } + +@@ -156938,7 +165136,7 @@ func (r *RegionInstanceGroupManagersService) ResizeAdvanced(project string, regi + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeAdvancedCall { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -156946,7 +165144,7 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) RequestId(requestId stri + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeAdvancedCall { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -156954,21 +165152,21 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Fi + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeAdvancedCall { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) Header() http.Header { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -156976,14 +165174,14 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (* + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersresizeadvancedrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerupdateinstanceconfigreq) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -156998,14 +165196,14 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (* + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.resizeAdvanced" call. ++// Do executes the "compute.regionInstanceGroupManagers.updatePerInstanceConfigs" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -157036,10 +165234,10 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.Cal + } + return ret, nil + // { +- // "description": "Resizes the regional managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances. The resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the get or listmanagedinstances method. If the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", ++ // "description": "Inserts or updates per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.resizeAdvanced", ++ // "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", + // "parameterOrder": [ + // "project", + // "region", +@@ -157047,7 +165245,7 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.Cal + // ], + // "parameters": { + // "instanceGroupManager": { +- // "description": "The name of the managed instance group. It must be a string that meets the requirements in RFC1035.", ++ // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -157060,7 +165258,7 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.Cal + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request. It must be a string that meets the requirements in RFC1035.", ++ // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -157071,9 +165269,9 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.Cal + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", ++ // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "request": { +- // "$ref": "RegionInstanceGroupManagersResizeAdvancedRequest" ++ // "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" + // }, + // "response": { + // "$ref": "Operation" +@@ -157086,114 +165284,103 @@ func (c *RegionInstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.Cal + + } + +-// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": ++// method id "compute.regionInstanceGroups.get": + +-type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceGroupsGetCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroup string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetAutoHealingPolicies: Modifies the autohealing policy for the +-// instances in this managed instance group. [Deprecated] This method is +-// deprecated. Use regionInstanceGroupManagers.patch instead. ++// Get: Returns the specified instance group resource. + // +-// - instanceGroupManager: Name of the managed instance group. ++// - instanceGroup: Name of the instance group resource to return. + // - project: Project ID for this request. + // - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +- c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { ++ c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +- c.urlParams_.Set("requestId", requestId) ++ c.instanceGroup = instanceGroup + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { ++func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { ++func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { ++func (c *RegionInstanceGroupsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroupManager": c.instanceGroupManager, ++ "project": c.project, ++ "region": c.region, ++ "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// Do executes the "compute.regionInstanceGroups.get" call. ++// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// *InstanceGroup.ServerResponse.Header or (if a response was returned ++// at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -157212,7 +165399,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &InstanceGroup{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -157224,21 +165411,286 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl + } + return ret, nil + // { +- // "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Use regionInstanceGroupManagers.patch instead.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", +- // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", ++ // "description": "Returns the specified instance group resource.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", ++ // "httpMethod": "GET", ++ // "id": "compute.regionInstanceGroups.get", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroupManager" ++ // "instanceGroup" + // ], + // "parameters": { +- // "instanceGroupManager": { +- // "description": "Name of the managed instance group.", ++ // "instanceGroup": { ++ // "description": "Name of the instance group resource to return.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", ++ // "response": { ++ // "$ref": "InstanceGroup" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstanceGroups.list": ++ ++type RegionInstanceGroupsListCall struct { ++ s *Service ++ project string ++ region string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// List: Retrieves the list of instance group resources contained within ++// the specified region. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { ++ c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *RegionInstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstanceGroupsListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstanceGroups.list" call. ++// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *RegionInstanceGroupList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &RegionInstanceGroupList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves the list of instance group resources contained within the specified region.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroups", ++ // "httpMethod": "GET", ++ // "id": "compute.regionInstanceGroups.list", ++ // "parameterOrder": [ ++ // "project", ++ // "region" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", +@@ -157253,76 +165705,164 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", +- // "type": "string" ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", +- // "request": { +- // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" +- // }, ++ // "path": "projects/{project}/regions/{region}/instanceGroups", + // "response": { +- // "$ref": "Operation" ++ // "$ref": "RegionInstanceGroupList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} + +-type RegionInstanceGroupManagersSetInstanceTemplateCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++// method id "compute.regionInstanceGroups.listInstances": ++ ++type RegionInstanceGroupsListInstancesCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroup string ++ regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetInstanceTemplate: Sets the instance template to use when creating +-// new instances or recreating instances in this group. Existing +-// instances are not affected. ++// ListInstances: Lists the instances in the specified instance group ++// and displays information about the named ports. Depending on the ++// specified options, this method can list all instances or only the ++// instances that are running. The orderBy query parameter is not ++// supported. + // +-// - instanceGroupManager: The name of the managed instance group. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { +- c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instanceGroup: Name of the regional instance group for which we ++// want to list the instances. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { ++ c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest ++ c.instanceGroup = instanceGroup ++ c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest + return c + } + +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { +- c.urlParams_.Set("requestId", requestId) ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { ++func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -157330,21 +165870,21 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googlea + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { ++func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { ++func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -157352,14 +165892,14 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt strin + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -157367,21 +165907,22 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt strin + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroupManager": c.instanceGroupManager, ++ "project": c.project, ++ "region": c.region, ++ "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++// Do executes the "compute.regionInstanceGroups.listInstances" call. ++// Exactly one of *RegionInstanceGroupsListInstances or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *RegionInstanceGroupsListInstances.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -157400,7 +165941,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &Operation{ ++ ret := &RegionInstanceGroupsListInstances{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -157412,22 +165953,45 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap + } + return ret, nil + // { +- // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", ++ // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", ++ // "id": "compute.regionInstanceGroups.listInstances", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroupManager" ++ // "instanceGroup" + // ], + // "parameters": { +- // "instanceGroupManager": { +- // "description": "The name of the managed instance group.", ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "instanceGroup": { ++ // "description": "Name of the regional instance group for which we want to list the instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -157441,53 +166005,75 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap + // "required": true, + // "type": "string" + // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", +- // "type": "string" ++ // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", ++ // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "request": { +- // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" ++ // "$ref": "RegionInstanceGroupsListInstancesRequest" + // }, + // "response": { +- // "$ref": "Operation" ++ // "$ref": "RegionInstanceGroupsListInstances" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + + } + +-// method id "compute.regionInstanceGroupManagers.setTargetPools": ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} + +-type RegionInstanceGroupManagersSetTargetPoolsCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++// method id "compute.regionInstanceGroups.setNamedPorts": ++ ++type RegionInstanceGroupsSetNamedPortsCall struct { ++ s *Service ++ project string ++ region string ++ instanceGroup string ++ regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetTargetPools: Modifies the target pools to which all new instances +-// in this group are assigned. Existing instances in the group are not +-// affected. ++// SetNamedPorts: Sets the named ports for the specified regional ++// instance group. + // +-// - instanceGroupManager: Name of the managed instance group. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { +- c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - instanceGroup: The name of the regional instance group where the ++// named ports are updated. ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { ++ c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest ++ c.instanceGroup = instanceGroup ++ c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest + return c + } + +@@ -157502,7 +166088,7 @@ func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, regi + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { ++func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -157510,7 +166096,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId stri + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { ++func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -157518,21 +166104,21 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Fi + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { ++func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { ++func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -157540,14 +166126,14 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (* + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -157555,21 +166141,21 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (* + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroupManager": c.instanceGroupManager, ++ "project": c.project, ++ "region": c.region, ++ "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. ++// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -157600,18 +166186,18 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal + } + return ret, nil + // { +- // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", ++ // "description": "Sets the named ports for the specified regional instance group.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.setTargetPools", ++ // "id": "compute.regionInstanceGroups.setNamedPorts", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroupManager" ++ // "instanceGroup" + // ], + // "parameters": { +- // "instanceGroupManager": { +- // "description": "Name of the managed instance group.", ++ // "instanceGroup": { ++ // "description": "The name of the regional instance group where the named ports are updated.", + // "location": "path", + // "required": true, + // "type": "string" +@@ -157635,9 +166221,9 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", ++ // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { +- // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" ++ // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -157650,9 +166236,9 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal + + } + +-// method id "compute.regionInstanceGroupManagers.testIamPermissions": ++// method id "compute.regionInstanceGroups.testIamPermissions": + +-type RegionInstanceGroupManagersTestIamPermissionsCall struct { ++type RegionInstanceGroupsTestIamPermissionsCall struct { + s *Service + project string + region string +@@ -157669,8 +166255,8 @@ type RegionInstanceGroupManagersTestIamPermissionsCall struct { + // - project: Project ID for this request. + // - region: The name of the region for this request. + // - resource: Name or id of the resource for this request. +-func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { +- c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { ++ c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource +@@ -157681,7 +166267,7 @@ func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { ++func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -157689,21 +166275,21 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleap + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { ++func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { ++func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -157718,7 +166304,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -157733,14 +166319,14 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. ++// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. + // Exactly one of *TestPermissionsResponse or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either + // *TestPermissionsResponse.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -157772,9 +166358,9 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", ++ // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.testIamPermissions", ++ // "id": "compute.regionInstanceGroups.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", +@@ -157803,7 +166389,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", ++ // "path": "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, +@@ -157819,230 +166405,29 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi + + } + +-// method id "compute.regionInstanceGroupManagers.update": ++// method id "compute.regionInstanceTemplates.delete": + +-type RegionInstanceGroupManagersUpdateCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- instancegroupmanager *InstanceGroupManager +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceTemplatesDeleteCall struct { ++ s *Service ++ project string ++ region string ++ instanceTemplate string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Update: Updates a managed instance group using the information that +-// you specify in the request. This operation is marked as DONE when the +-// group is updated even if the instances in the group have not yet been +-// updated. You must separately verify the status of the individual +-// instances with the listmanagedinstances method. If you update your +-// group to specify a new template or instance configuration, it's +-// possible that your intended specification for each VM in the group is +-// different from the current state of that VM. To learn how to apply an +-// updated configuration to the VMs in a MIG, see Updating instances in +-// a MIG. ++// Delete: Deletes the specified instance template. Deleting an instance ++// template is permanent and cannot be undone. + // +-// - instanceGroupManager: The name of the instance group manager. ++// - instanceTemplate: The name of the instance template to delete. + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { +- c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.region = region +- c.instanceGroupManager = instanceGroupManager +- c.instancegroupmanager = instancegroupmanager +- return c +-} +- +-// RequestId sets the optional parameter "requestId": An optional +-// request ID to identify requests. Specify a unique request ID so that +-// if you must retry your request, the server will know to ignore the +-// request if it has already been completed. For example, consider a +-// situation where you make an initial request and the request times +-// out. If you make the request again with the same request ID, the +-// server can check if original operation with the same request ID was +-// received, and if so, will ignore the second request. This prevents +-// clients from accidentally creating duplicate commitments. The request +-// ID must be a valid UUID with the exception that zero UUID is not +-// supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { +- c.urlParams_.Set("requestId", requestId) +- return c +-} +- +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("PUT", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroupManager": c.instanceGroupManager, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.regionInstanceGroupManagers.update" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &Operation{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method. If you update your group to specify a new template or instance configuration, it's possible that your intended specification for each VM in the group is different from the current state of that VM. To learn how to apply an updated configuration to the VMs in a MIG, see Updating instances in a MIG.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", +- // "httpMethod": "PUT", +- // "id": "compute.regionInstanceGroupManagers.update", +- // "parameterOrder": [ +- // "project", +- // "region", +- // "instanceGroupManager" +- // ], +- // "parameters": { +- // "instanceGroupManager": { +- // "description": "The name of the instance group manager.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // }, +- // "region": { +- // "description": "Name of the region scoping this request.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, +- // "requestId": { +- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", +- // "location": "query", +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", +- // "request": { +- // "$ref": "InstanceGroupManager" +- // }, +- // "response": { +- // "$ref": "Operation" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" +- // ] +- // } +- +-} +- +-// method id "compute.regionInstanceGroupManagers.updatePerInstanceConfigs": +- +-type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { +- s *Service +- project string +- region string +- instanceGroupManager string +- regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header +-} +- +-// UpdatePerInstanceConfigs: Inserts or updates per-instance +-// configurations for the managed instance group. perInstanceConfig.name +-// serves as a key used to distinguish whether to perform insert or +-// patch. +-// +-// - instanceGroupManager: The name of the managed instance group. It +-// should conform to RFC1035. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request, should conform to +-// RFC1035. +-func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { +- c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the region for this request. ++func (r *RegionInstanceTemplatesService) Delete(project string, region string, instanceTemplate string) *RegionInstanceTemplatesDeleteCall { ++ c := &RegionInstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroupManager = instanceGroupManager +- c.regioninstancegroupmanagerupdateinstanceconfigreq = regioninstancegroupmanagerupdateinstanceconfigreq ++ c.instanceTemplate = instanceTemplate + return c + } + +@@ -158057,7 +166442,7 @@ func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project st + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { ++func (c *RegionInstanceTemplatesDeleteCall) RequestId(requestId string) *RegionInstanceTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -158065,7 +166450,7 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requ + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { ++func (c *RegionInstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -158073,21 +166458,21 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...go + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { ++func (c *RegionInstanceTemplatesDeleteCall) Context(ctx context.Context) *RegionInstanceTemplatesDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { ++func (c *RegionInstanceTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -158095,36 +166480,31 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerupdateinstanceconfigreq) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroupManager": c.instanceGroupManager, ++ "project": c.project, ++ "region": c.region, ++ "instanceTemplate": c.instanceTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroupManagers.updatePerInstanceConfigs" call. ++// Do executes the "compute.regionInstanceTemplates.delete" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -158155,19 +166535,20 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo + } + return ret, nil + // { +- // "description": "Inserts or updates per-instance configurations for the managed instance group. perInstanceConfig.name serves as a key used to distinguish whether to perform insert or patch.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", +- // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", ++ // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.regionInstanceTemplates.delete", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroupManager" ++ // "instanceTemplate" + // ], + // "parameters": { +- // "instanceGroupManager": { +- // "description": "The name of the managed instance group. It should conform to RFC1035.", ++ // "instanceTemplate": { ++ // "description": "The name of the instance template to delete.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -158179,8 +166560,9 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request, should conform to RFC1035.", ++ // "description": "The name of the region for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +@@ -158190,10 +166572,7 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", +- // "request": { +- // "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" +- // }, ++ // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "Operation" + // }, +@@ -158205,36 +166584,36 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo + + } + +-// method id "compute.regionInstanceGroups.get": ++// method id "compute.regionInstanceTemplates.get": + +-type RegionInstanceGroupsGetCall struct { +- s *Service +- project string +- region string +- instanceGroup string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceTemplatesGetCall struct { ++ s *Service ++ project string ++ region string ++ instanceTemplate string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified instance group resource. ++// Get: Returns the specified instance template. + // +-// - instanceGroup: Name of the instance group resource to return. ++// - instanceTemplate: The name of the instance template. + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { +- c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the region for this request. ++func (r *RegionInstanceTemplatesService) Get(project string, region string, instanceTemplate string) *RegionInstanceTemplatesGetCall { ++ c := &RegionInstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroup = instanceGroup ++ c.instanceTemplate = instanceTemplate + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { ++func (c *RegionInstanceTemplatesGetCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -158244,7 +166623,7 @@ func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstan + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { ++func (c *RegionInstanceTemplatesGetCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -158252,21 +166631,21 @@ func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInsta + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { ++func (c *RegionInstanceTemplatesGetCall) Context(ctx context.Context) *RegionInstanceTemplatesGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupsGetCall) Header() http.Header { ++func (c *RegionInstanceTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -158279,7 +166658,7 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -158287,21 +166666,21 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroup": c.instanceGroup, ++ "project": c.project, ++ "region": c.region, ++ "instanceTemplate": c.instanceTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroups.get" call. +-// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *InstanceGroup.ServerResponse.Header or (if a response was returned +-// at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.regionInstanceTemplates.get" call. ++// Exactly one of *InstanceTemplate or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *InstanceTemplate.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { ++func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -158320,7 +166699,7 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InstanceGroup{ ++ ret := &InstanceTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -158332,19 +166711,20 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc + } + return ret, nil + // { +- // "description": "Returns the specified instance group resource.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", ++ // "description": "Returns the specified instance template.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "httpMethod": "GET", +- // "id": "compute.regionInstanceGroups.get", ++ // "id": "compute.regionInstanceTemplates.get", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceGroup" ++ // "instanceTemplate" + // ], + // "parameters": { +- // "instanceGroup": { +- // "description": "Name of the instance group resource to return.", ++ // "instanceTemplate": { ++ // "description": "The name of the instance template.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, +@@ -158356,15 +166736,16 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "The name of the region for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}", ++ // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "response": { +- // "$ref": "InstanceGroup" ++ // "$ref": "InstanceTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -158375,163 +166756,91 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc + + } + +-// method id "compute.regionInstanceGroups.list": ++// method id "compute.regionInstanceTemplates.insert": + +-type RegionInstanceGroupsListCall struct { +- s *Service +- project string +- region string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceTemplatesInsertCall struct { ++ s *Service ++ project string ++ region string ++ instancetemplate *InstanceTemplate ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// List: Retrieves the list of instance group resources contained within +-// the specified region. ++// Insert: Creates an instance template in the specified project and ++// region using the global instance template whose URL is included in ++// the request. + // + // - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { +- c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the region for this request. ++func (r *RegionInstanceTemplatesService) Insert(project string, region string, instancetemplate *InstanceTemplate) *RegionInstanceTemplatesInsertCall { ++ c := &RegionInstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region ++ c.instancetemplate = instancetemplate + return c + } + +-// Filter sets the optional parameter "filter": A filter expression that +-// filters resources listed in the response. Most Compute resources +-// support two types of filter expressions: expressions that support +-// regular expressions and expressions that follow API improvement +-// proposal AIP-160. If you want to use AIP-160, your expression must +-// specify the field name, an operator, and the value that you want to +-// use for filtering. The value must be a string, a number, or a +-// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +-// or `:`. For example, if you are filtering Compute Engine instances, +-// you can exclude instances named `example-instance` by specifying +-// `name != example-instance`. The `:` operator can be used with string +-// fields to match substrings. For non-string fields it is equivalent to +-// the `=` operator. The `:*` comparison can be used to test whether a +-// key has been defined. For example, to find all objects with `owner` +-// label use: ``` labels.owner:* ``` You can also filter nested fields. +-// For example, you could specify `scheduling.automaticRestart = false` +-// to include instances only if they are not scheduled for automatic +-// restarts. You can use filtering on nested fields to filter based on +-// resource labels. To filter on multiple expressions, provide each +-// separate expression within parentheses. For example: ``` +-// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +-// ``` By default, each expression is an `AND` expression. However, you +-// can include `AND` and `OR` expressions explicitly. For example: ``` +-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +-// AND (scheduling.automaticRestart = true) ``` If you want to use a +-// regular expression, use the `eq` (equal) or `ne` (not equal) operator +-// against a single un-parenthesized expression with or without quotes +-// or against multiple parenthesized expressions. Examples: `fieldname +-// eq unquoted literal` `fieldname eq 'single quoted literal'` +-// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +-// (fieldname2 ne "literal")` The literal value is interpreted as a +-// regular expression using Google RE2 library syntax. The literal value +-// must match the entire field. For example, to filter for instances +-// that do not end with name "instance", you would use `name ne +-// .*instance`. +-func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { +- c.urlParams_.Set("filter", filter) +- return c +-} +- +-// MaxResults sets the optional parameter "maxResults": The maximum +-// number of results per page that should be returned. If the number of +-// available results is larger than `maxResults`, Compute Engine returns +-// a `nextPageToken` that can be used to get the next page of results in +-// subsequent list requests. Acceptable values are `0` to `500`, +-// inclusive. (Default: `500`) +-func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { +- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +- return c +-} +- +-// OrderBy sets the optional parameter "orderBy": Sorts list results by +-// a certain order. By default, results are returned in alphanumerical +-// order based on the resource name. You can also sort results in +-// descending order based on the creation timestamp using +-// `orderBy="creationTimestamp desc". This sorts results based on the +-// `creationTimestamp` field in reverse chronological order (newest +-// result first). Use this to sort resources like operations so that the +-// newest operation is returned first. Currently, only sorting by `name` +-// or `creationTimestamp desc` is supported. +-func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { +- c.urlParams_.Set("orderBy", orderBy) +- return c +-} +- +-// PageToken sets the optional parameter "pageToken": Specifies a page +-// token to use. Set `pageToken` to the `nextPageToken` returned by a +-// previous list request to get the next page of results. +-func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { +- c.urlParams_.Set("pageToken", pageToken) +- return c +-} +- +-// ReturnPartialSuccess sets the optional parameter +-// "returnPartialSuccess": Opt-in for partial success behavior which +-// provides partial results in case of failure. The default value is +-// false. +-func (c *RegionInstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListCall { +- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstanceTemplatesInsertCall) RequestId(requestId string) *RegionInstanceTemplatesInsertCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { ++func (c *RegionInstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + +-// IfNoneMatch sets the optional parameter which makes the operation +-// fail if the object's ETag matches the given value. This is useful for +-// getting updates only after the object has changed since the last +-// request. Use googleapi.IsNotModified to check whether the response +-// error from Do is the result of In-None-Match. +-func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { +- c.ifNoneMatch_ = entityTag +- return c +-} +- + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { ++func (c *RegionInstanceTemplatesInsertCall) Context(ctx context.Context) *RegionInstanceTemplatesInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupsListCall) Header() http.Header { ++func (c *RegionInstanceTemplatesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- if c.ifNoneMatch_ != "" { +- reqHeaders.Set("If-None-Match", c.ifNoneMatch_) +- } + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("GET", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } +@@ -158543,14 +166852,14 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroups.list" call. +-// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *RegionInstanceGroupList.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { ++// Do executes the "compute.regionInstanceTemplates.insert" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -158569,7 +166878,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &RegionInstanceGroupList{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -158581,38 +166890,15 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region + } + return ret, nil + // { +- // "description": "Retrieves the list of instance group resources contained within the specified region.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroups", +- // "httpMethod": "GET", +- // "id": "compute.regionInstanceGroups.list", ++ // "description": "Creates an instance template in the specified project and region using the global instance template whose URL is included in the request.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstanceTemplates.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { +- // "filter": { +- // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", +- // "location": "query", +- // "type": "string" +- // }, +- // "maxResults": { +- // "default": "500", +- // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +- // "format": "uint32", +- // "location": "query", +- // "minimum": "0", +- // "type": "integer" +- // }, +- // "orderBy": { +- // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", +- // "location": "query", +- // "type": "string" +- // }, +- // "pageToken": { +- // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", +- // "location": "query", +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -158621,80 +166907,54 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "The name of the region for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +- // "returnPartialSuccess": { +- // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", +- // "type": "boolean" ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroups", ++ // "path": "projects/{project}/regions/{region}/instanceTemplates", ++ // "request": { ++ // "$ref": "InstanceTemplate" ++ // }, + // "response": { +- // "$ref": "RegionInstanceGroupList" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] +- // } +- +-} +- +-// Pages invokes f for each page of results. +-// A non-nil error returned from f will halt the iteration. +-// The provided context supersedes any context provided to the Context method. +-func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { +- c.ctx_ = ctx +- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point +- for { +- x, err := c.Do() +- if err != nil { +- return err +- } +- if err := f(x); err != nil { +- return err +- } +- if x.NextPageToken == "" { +- return nil +- } +- c.PageToken(x.NextPageToken) +- } ++ // } ++ + } + +-// method id "compute.regionInstanceGroups.listInstances": ++// method id "compute.regionInstanceTemplates.list": + +-type RegionInstanceGroupsListInstancesCall struct { +- s *Service +- project string +- region string +- instanceGroup string +- regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstanceTemplatesListCall struct { ++ s *Service ++ project string ++ region string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// ListInstances: Lists the instances in the specified instance group +-// and displays information about the named ports. Depending on the +-// specified options, this method can list all instances or only the +-// instances that are running. The orderBy query parameter is not +-// supported. ++// List: Retrieves a list of instance templates that are contained ++// within the specified project and region. + // +-// - instanceGroup: Name of the regional instance group for which we +-// want to list the instances. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { +- c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - region: The name of the regions for this request. ++func (r *RegionInstanceTemplatesService) List(project string, region string) *RegionInstanceTemplatesListCall { ++ c := &RegionInstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroup = instanceGroup +- c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest + return c + } + +@@ -158733,7 +166993,7 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) Filter(filter string) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -158744,7 +167004,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionIns + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) MaxResults(maxResults int64) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -158758,7 +167018,7 @@ func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *Re + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) OrderBy(orderBy string) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -158766,7 +167026,7 @@ func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionI + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) PageToken(pageToken string) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -158775,7 +167035,7 @@ func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *Reg + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -158783,67 +167043,73 @@ func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnParti + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } + ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionInstanceTemplatesListCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { ++func (c *RegionInstanceTemplatesListCall) Context(ctx context.Context) *RegionInstanceTemplatesListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { ++func (c *RegionInstanceTemplatesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) +- if err != nil { +- return nil, err ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } +- reqHeaders.Set("Content-Type", "application/json") ++ var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroup": c.instanceGroup, ++ "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroups.listInstances" call. +-// Exactly one of *RegionInstanceGroupsListInstances or error will be +-// non-nil. Any non-2xx status code is an error. Response headers are in +-// either *RegionInstanceGroupsListInstances.ServerResponse.Header or +-// (if a response was returned at all) in +-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +-// whether the returned error was because http.StatusNotModified was +-// returned. +-func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { ++// Do executes the "compute.regionInstanceTemplates.list" call. ++// Exactly one of *InstanceTemplateList or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *InstanceTemplateList.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -158862,7 +167128,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &RegionInstanceGroupsListInstances{ ++ ret := &InstanceTemplateList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -158874,14 +167140,13 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", +- // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroups.listInstances", ++ // "description": "Retrieves a list of instance templates that are contained within the specified project and region.", ++ // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", ++ // "httpMethod": "GET", ++ // "id": "compute.regionInstanceTemplates.list", + // "parameterOrder": [ + // "project", +- // "region", +- // "instanceGroup" ++ // "region" + // ], + // "parameters": { + // "filter": { +@@ -158889,12 +167154,6 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + // "location": "query", + // "type": "string" + // }, +- // "instanceGroup": { +- // "description": "Name of the regional instance group for which we want to list the instances.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", +@@ -158921,8 +167180,9 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "The name of the regions for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +@@ -158932,12 +167192,9 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", +- // "request": { +- // "$ref": "RegionInstanceGroupsListInstancesRequest" +- // }, ++ // "path": "projects/{project}/regions/{region}/instanceTemplates", + // "response": { +- // "$ref": "RegionInstanceGroupsListInstances" ++ // "$ref": "InstanceTemplateList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -158951,7 +167208,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { ++func (c *RegionInstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -158969,32 +167226,28 @@ func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f fun + } + } + +-// method id "compute.regionInstanceGroups.setNamedPorts": ++// method id "compute.regionInstances.bulkInsert": + +-type RegionInstanceGroupsSetNamedPortsCall struct { +- s *Service +- project string +- region string +- instanceGroup string +- regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstancesBulkInsertCall struct { ++ s *Service ++ project string ++ region string ++ bulkinsertinstanceresource *BulkInsertInstanceResource ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// SetNamedPorts: Sets the named ports for the specified regional +-// instance group. ++// BulkInsert: Creates multiple instances in a given region. Count ++// specifies the number of instances to create. + // +-// - instanceGroup: The name of the regional instance group where the +-// named ports are updated. +-// - project: Project ID for this request. +-// - region: Name of the region scoping this request. +-func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { +- c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++func (r *RegionInstancesService) BulkInsert(project string, region string, bulkinsertinstanceresource *BulkInsertInstanceResource) *RegionInstancesBulkInsertCall { ++ c := &RegionInstancesBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceGroup = instanceGroup +- c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest ++ c.bulkinsertinstanceresource = bulkinsertinstanceresource + return c + } + +@@ -159009,7 +167262,7 @@ func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region strin + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { ++func (c *RegionInstancesBulkInsertCall) RequestId(requestId string) *RegionInstancesBulkInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -159017,7 +167270,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *Reg + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { ++func (c *RegionInstancesBulkInsertCall) Fields(s ...googleapi.Field) *RegionInstancesBulkInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159025,21 +167278,21 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Re + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { ++func (c *RegionInstancesBulkInsertCall) Context(ctx context.Context) *RegionInstancesBulkInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { ++func (c *RegionInstancesBulkInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -159047,14 +167300,14 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Res + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertinstanceresource) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instances/bulkInsert") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -159062,21 +167315,20 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Res + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceGroup": c.instanceGroup, ++ "project": c.project, ++ "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. ++// Do executes the "compute.regionInstances.bulkInsert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -159107,22 +167359,15 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Sets the named ports for the specified regional instance group.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", ++ // "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", ++ // "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroups.setNamedPorts", ++ // "id": "compute.regionInstances.bulkInsert", + // "parameterOrder": [ + // "project", +- // "region", +- // "instanceGroup" ++ // "region" + // ], + // "parameters": { +- // "instanceGroup": { +- // "description": "The name of the regional instance group where the named ports are updated.", +- // "location": "path", +- // "required": true, +- // "type": "string" +- // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -159131,8 +167376,9 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // }, + // "region": { +- // "description": "Name of the region scoping this request.", ++ // "description": "The name of the region for this request.", + // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, +@@ -159142,9 +167388,9 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", ++ // "path": "projects/{project}/regions/{region}/instances/bulkInsert", + // "request": { +- // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" ++ // "$ref": "BulkInsertInstanceResource" + // }, + // "response": { + // "$ref": "Operation" +@@ -159157,38 +167403,56 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) + + } + +-// method id "compute.regionInstanceGroups.testIamPermissions": ++// method id "compute.regionInstantSnapshots.delete": + +-type RegionInstanceGroupsTestIamPermissionsCall struct { +- s *Service +- project string +- region string +- resource string +- testpermissionsrequest *TestPermissionsRequest +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstantSnapshotsDeleteCall struct { ++ s *Service ++ project string ++ region string ++ instantSnapshot string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// TestIamPermissions: Returns permissions that a caller has on the +-// specified resource. ++// Delete: Deletes the specified InstantSnapshot resource. Keep in mind ++// that deleting a single instantSnapshot might not necessarily delete ++// all the data on that instantSnapshot. If any data on the ++// instantSnapshot that is marked for deletion is needed for subsequent ++// instantSnapshots, the data will be moved to the next corresponding ++// instantSnapshot. For more information, see Deleting instantSnapshots. + // ++// - instantSnapshot: Name of the InstantSnapshot resource to delete. + // - project: Project ID for this request. + // - region: The name of the region for this request. +-// - resource: Name or id of the resource for this request. +-func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { +- c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstantSnapshotsService) Delete(project string, region string, instantSnapshot string) *RegionInstantSnapshotsDeleteCall { ++ c := &RegionInstantSnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.resource = resource +- c.testpermissionsrequest = testpermissionsrequest ++ c.instantSnapshot = instantSnapshot ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *RegionInstantSnapshotsDeleteCall) RequestId(requestId string) *RegionInstantSnapshotsDeleteCall { ++ c.urlParams_.Set("requestId", requestId) + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { ++func (c *RegionInstantSnapshotsDeleteCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159196,21 +167460,21 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { ++func (c *RegionInstantSnapshotsDeleteCall) Context(ctx context.Context) *RegionInstantSnapshotsDeleteCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { ++func (c *RegionInstantSnapshotsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -159218,36 +167482,31 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*htt + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) +- if err != nil { +- return nil, err +- } +- reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) ++ req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "resource": c.resource, ++ "project": c.project, ++ "region": c.region, ++ "instantSnapshot": c.instantSnapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. +-// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *TestPermissionsResponse.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use +-// googleapi.IsNotModified to check whether the returned error was +-// because http.StatusNotModified was returned. +-func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++// Do executes the "compute.regionInstantSnapshots.delete" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionInstantSnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -159266,7 +167525,7 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &TestPermissionsResponse{ ++ ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -159278,16 +167537,23 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp + } + return ret, nil + // { +- // "description": "Returns permissions that a caller has on the specified resource.", +- // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", +- // "httpMethod": "POST", +- // "id": "compute.regionInstanceGroups.testIamPermissions", ++ // "description": "Deletes the specified InstantSnapshot resource. Keep in mind that deleting a single instantSnapshot might not necessarily delete all the data on that instantSnapshot. If any data on the instantSnapshot that is marked for deletion is needed for subsequent instantSnapshots, the data will be moved to the next corresponding instantSnapshot. For more information, see Deleting instantSnapshots.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", ++ // "httpMethod": "DELETE", ++ // "id": "compute.regionInstantSnapshots.delete", + // "parameterOrder": [ + // "project", + // "region", +- // "resource" ++ // "instantSnapshot" + // ], + // "parameters": { ++ // "instantSnapshot": { ++ // "description": "Name of the InstantSnapshot resource to delete.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", +@@ -159302,53 +167568,49 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp + // "required": true, + // "type": "string" + // }, +- // "resource": { +- // "description": "Name or id of the resource for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", +- // "request": { +- // "$ref": "TestPermissionsRequest" +- // }, ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + // "response": { +- // "$ref": "TestPermissionsResponse" ++ // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute", +- // "https://www.googleapis.com/auth/compute.readonly" ++ // "https://www.googleapis.com/auth/compute" + // ] + // } + + } + +-// method id "compute.regionInstanceTemplates.delete": ++// method id "compute.regionInstantSnapshots.export": + +-type RegionInstanceTemplatesDeleteCall struct { +- s *Service +- project string +- region string +- instanceTemplate string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstantSnapshotsExportCall struct { ++ s *Service ++ project string ++ region string ++ instantSnapshot string ++ regioninstantsnapshotsexportrequest *RegionInstantSnapshotsExportRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// Delete: Deletes the specified instance template. Deleting an instance +-// template is permanent and cannot be undone. ++// Export: Export the changed blocks between two instant snapshots to a ++// customer's bucket in the user specified format. + // +-// - instanceTemplate: The name of the instance template to delete. ++// - instantSnapshot: Name of the instant snapshot to export. + // - project: Project ID for this request. +-// - region: The name of the region for this request. +-func (r *RegionInstanceTemplatesService) Delete(project string, region string, instanceTemplate string) *RegionInstanceTemplatesDeleteCall { +- c := &RegionInstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the zone for this request. ++func (r *RegionInstantSnapshotsService) Export(project string, region string, instantSnapshot string, regioninstantsnapshotsexportrequest *RegionInstantSnapshotsExportRequest) *RegionInstantSnapshotsExportCall { ++ c := &RegionInstantSnapshotsExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceTemplate = instanceTemplate ++ c.instantSnapshot = instantSnapshot ++ c.regioninstantsnapshotsexportrequest = regioninstantsnapshotsexportrequest + return c + } + +@@ -159363,7 +167625,7 @@ func (r *RegionInstanceTemplatesService) Delete(project string, region string, i + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceTemplatesDeleteCall) RequestId(requestId string) *RegionInstanceTemplatesDeleteCall { ++func (c *RegionInstantSnapshotsExportCall) RequestId(requestId string) *RegionInstantSnapshotsExportCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -159371,7 +167633,7 @@ func (c *RegionInstanceTemplatesDeleteCall) RequestId(requestId string) *RegionI + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesDeleteCall { ++func (c *RegionInstantSnapshotsExportCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsExportCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159379,21 +167641,21 @@ func (c *RegionInstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *Region + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceTemplatesDeleteCall) Context(ctx context.Context) *RegionInstanceTemplatesDeleteCall { ++func (c *RegionInstantSnapshotsExportCall) Context(ctx context.Context) *RegionInstantSnapshotsExportCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceTemplatesDeleteCall) Header() http.Header { ++func (c *RegionInstantSnapshotsExportCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsExportCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -159401,31 +167663,36 @@ func (c *RegionInstanceTemplatesDeleteCall) doRequest(alt string) (*http.Respons + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstantsnapshotsexportrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}/export") + urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("DELETE", urls, body) ++ req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceTemplate": c.instanceTemplate, ++ "project": c.project, ++ "region": c.region, ++ "instantSnapshot": c.instantSnapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceTemplates.delete" call. ++// Do executes the "compute.regionInstantSnapshots.export" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstantSnapshotsExportCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -159456,18 +167723,18 @@ func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*O + } + return ret, nil + // { +- // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone.", +- // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", +- // "httpMethod": "DELETE", +- // "id": "compute.regionInstanceTemplates.delete", ++ // "description": "Export the changed blocks between two instant snapshots to a customer's bucket in the user specified format.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}/export", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstantSnapshots.export", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceTemplate" ++ // "instantSnapshot" + // ], + // "parameters": { +- // "instanceTemplate": { +- // "description": "The name of the instance template to delete.", ++ // "instantSnapshot": { ++ // "description": "Name of the instant snapshot to export.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -159481,7 +167748,7 @@ func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*O + // "type": "string" + // }, + // "region": { +- // "description": "The name of the region for this request.", ++ // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, +@@ -159493,7 +167760,10 @@ func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*O + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}/export", ++ // "request": { ++ // "$ref": "RegionInstantSnapshotsExportRequest" ++ // }, + // "response": { + // "$ref": "Operation" + // }, +@@ -159505,36 +167775,37 @@ func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*O + + } + +-// method id "compute.regionInstanceTemplates.get": ++// method id "compute.regionInstantSnapshots.get": + +-type RegionInstanceTemplatesGetCall struct { +- s *Service +- project string +- region string +- instanceTemplate string +- urlParams_ gensupport.URLParams +- ifNoneMatch_ string +- ctx_ context.Context +- header_ http.Header ++type RegionInstantSnapshotsGetCall struct { ++ s *Service ++ project string ++ region string ++ instantSnapshot string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Get: Returns the specified instance template. ++// Get: Returns the specified InstantSnapshot resource in the specified ++// region. + // +-// - instanceTemplate: The name of the instance template. ++// - instantSnapshot: Name of the InstantSnapshot resource to return. + // - project: Project ID for this request. + // - region: The name of the region for this request. +-func (r *RegionInstanceTemplatesService) Get(project string, region string, instanceTemplate string) *RegionInstanceTemplatesGetCall { +- c := &RegionInstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++func (r *RegionInstantSnapshotsService) Get(project string, region string, instantSnapshot string) *RegionInstantSnapshotsGetCall { ++ c := &RegionInstantSnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instanceTemplate = instanceTemplate ++ c.instantSnapshot = instantSnapshot + return c + } + + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceTemplatesGetCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesGetCall { ++func (c *RegionInstantSnapshotsGetCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159544,7 +167815,7 @@ func (c *RegionInstanceTemplatesGetCall) Fields(s ...googleapi.Field) *RegionIns + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *RegionInstanceTemplatesGetCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesGetCall { ++func (c *RegionInstantSnapshotsGetCall) IfNoneMatch(entityTag string) *RegionInstantSnapshotsGetCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -159552,21 +167823,21 @@ func (c *RegionInstanceTemplatesGetCall) IfNoneMatch(entityTag string) *RegionIn + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceTemplatesGetCall) Context(ctx context.Context) *RegionInstanceTemplatesGetCall { ++func (c *RegionInstantSnapshotsGetCall) Context(ctx context.Context) *RegionInstantSnapshotsGetCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceTemplatesGetCall) Header() http.Header { ++func (c *RegionInstantSnapshotsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -159579,7 +167850,7 @@ func (c *RegionInstanceTemplatesGetCall) doRequest(alt string) (*http.Response, + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -159587,21 +167858,21 @@ func (c *RegionInstanceTemplatesGetCall) doRequest(alt string) (*http.Response, + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, +- "instanceTemplate": c.instanceTemplate, ++ "project": c.project, ++ "region": c.region, ++ "instantSnapshot": c.instantSnapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceTemplates.get" call. +-// Exactly one of *InstanceTemplate or error will be non-nil. Any +-// non-2xx status code is an error. Response headers are in either +-// *InstanceTemplate.ServerResponse.Header or (if a response was +-// returned at all) in error.(*googleapi.Error).Header. Use ++// Do executes the "compute.regionInstantSnapshots.get" call. ++// Exactly one of *InstantSnapshot or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *InstantSnapshot.ServerResponse.Header or (if a response was returned ++// at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { ++func (c *RegionInstantSnapshotsGetCall) Do(opts ...googleapi.CallOption) (*InstantSnapshot, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -159620,7 +167891,7 @@ func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*Inst + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InstanceTemplate{ ++ ret := &InstantSnapshot{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -159632,18 +167903,18 @@ func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*Inst + } + return ret, nil + // { +- // "description": "Returns the specified instance template.", +- // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", ++ // "description": "Returns the specified InstantSnapshot resource in the specified region.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + // "httpMethod": "GET", +- // "id": "compute.regionInstanceTemplates.get", ++ // "id": "compute.regionInstantSnapshots.get", + // "parameterOrder": [ + // "project", + // "region", +- // "instanceTemplate" ++ // "instantSnapshot" + // ], + // "parameters": { +- // "instanceTemplate": { +- // "description": "The name of the instance template.", ++ // "instantSnapshot": { ++ // "description": "Name of the InstantSnapshot resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, +@@ -159664,9 +167935,9 @@ func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*Inst + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{instantSnapshot}", + // "response": { +- // "$ref": "InstanceTemplate" ++ // "$ref": "InstantSnapshot" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -159677,29 +167948,213 @@ func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*Inst + + } + +-// method id "compute.regionInstanceTemplates.insert": ++// method id "compute.regionInstantSnapshots.getIamPolicy": + +-type RegionInstanceTemplatesInsertCall struct { +- s *Service +- project string +- region string +- instancetemplate *InstanceTemplate +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstantSnapshotsGetIamPolicyCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header + } + +-// Insert: Creates an instance template in the specified project and +-// region using the global instance template whose URL is included in +-// the request. ++// GetIamPolicy: Gets the access control policy for a resource. May be ++// empty if no such policy or resource exists. + // + // - project: Project ID for this request. + // - region: The name of the region for this request. +-func (r *RegionInstanceTemplatesService) Insert(project string, region string, instancetemplate *InstanceTemplate) *RegionInstanceTemplatesInsertCall { +- c := &RegionInstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - resource: Name or id of the resource for this request. ++func (r *RegionInstantSnapshotsService) GetIamPolicy(project string, region string, resource string) *RegionInstantSnapshotsGetIamPolicyCall { ++ c := &RegionInstantSnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.instancetemplate = instancetemplate ++ c.resource = resource ++ return c ++} ++ ++// OptionsRequestedPolicyVersion sets the optional parameter ++// "optionsRequestedPolicyVersion": Requested IAM Policy version. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *RegionInstantSnapshotsGetIamPolicyCall { ++ c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsGetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) IfNoneMatch(entityTag string) *RegionInstantSnapshotsGetIamPolicyCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) Context(ctx context.Context) *RegionInstantSnapshotsGetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstantSnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{resource}/getIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstantSnapshots.getIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *RegionInstantSnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/getIamPolicy", ++ // "httpMethod": "GET", ++ // "id": "compute.regionInstantSnapshots.getIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "optionsRequestedPolicyVersion": { ++ // "description": "Requested IAM Policy version.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/getIamPolicy", ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstantSnapshots.insert": ++ ++type RegionInstantSnapshotsInsertCall struct { ++ s *Service ++ project string ++ region string ++ instantsnapshot *InstantSnapshot ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Insert: Creates an instant snapshot in the specified region. ++// ++// - project: Project ID for this request. ++// - region: Name of the region for this request. ++func (r *RegionInstantSnapshotsService) Insert(project string, region string, instantsnapshot *InstantSnapshot) *RegionInstantSnapshotsInsertCall { ++ c := &RegionInstantSnapshotsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.instantsnapshot = instantsnapshot + return c + } + +@@ -159714,7 +168169,7 @@ func (r *RegionInstanceTemplatesService) Insert(project string, region string, i + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstanceTemplatesInsertCall) RequestId(requestId string) *RegionInstanceTemplatesInsertCall { ++func (c *RegionInstantSnapshotsInsertCall) RequestId(requestId string) *RegionInstantSnapshotsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -159722,7 +168177,7 @@ func (c *RegionInstanceTemplatesInsertCall) RequestId(requestId string) *RegionI + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesInsertCall { ++func (c *RegionInstantSnapshotsInsertCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159730,21 +168185,21 @@ func (c *RegionInstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *Region + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceTemplatesInsertCall) Context(ctx context.Context) *RegionInstanceTemplatesInsertCall { ++func (c *RegionInstantSnapshotsInsertCall) Context(ctx context.Context) *RegionInstantSnapshotsInsertCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceTemplatesInsertCall) Header() http.Header { ++func (c *RegionInstantSnapshotsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -159752,14 +168207,14 @@ func (c *RegionInstanceTemplatesInsertCall) doRequest(alt string) (*http.Respons + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instantsnapshot) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -159773,14 +168228,14 @@ func (c *RegionInstanceTemplatesInsertCall) doRequest(alt string) (*http.Respons + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceTemplates.insert" call. ++// Do executes the "compute.regionInstantSnapshots.insert" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstantSnapshotsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -159811,10 +168266,10 @@ func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*O + } + return ret, nil + // { +- // "description": "Creates an instance template in the specified project and region using the global instance template whose URL is included in the request.", +- // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", ++ // "description": "Creates an instant snapshot in the specified region.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots", + // "httpMethod": "POST", +- // "id": "compute.regionInstanceTemplates.insert", ++ // "id": "compute.regionInstantSnapshots.insert", + // "parameterOrder": [ + // "project", + // "region" +@@ -159828,7 +168283,7 @@ func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*O + // "type": "string" + // }, + // "region": { +- // "description": "The name of the region for this request.", ++ // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, +@@ -159840,9 +168295,9 @@ func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*O + // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceTemplates", ++ // "path": "projects/{project}/regions/{region}/instantSnapshots", + // "request": { +- // "$ref": "InstanceTemplate" ++ // "$ref": "InstantSnapshot" + // }, + // "response": { + // "$ref": "Operation" +@@ -159855,9 +168310,9 @@ func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*O + + } + +-// method id "compute.regionInstanceTemplates.list": ++// method id "compute.regionInstantSnapshots.list": + +-type RegionInstanceTemplatesListCall struct { ++type RegionInstantSnapshotsListCall struct { + s *Service + project string + region string +@@ -159867,13 +168322,13 @@ type RegionInstanceTemplatesListCall struct { + header_ http.Header + } + +-// List: Retrieves a list of instance templates that are contained +-// within the specified project and region. ++// List: Retrieves the list of InstantSnapshot resources contained ++// within the specified region. + // + // - project: Project ID for this request. +-// - region: The name of the regions for this request. +-func (r *RegionInstanceTemplatesService) List(project string, region string) *RegionInstanceTemplatesListCall { +- c := &RegionInstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - region: The name of the region for this request. ++func (r *RegionInstantSnapshotsService) List(project string, region string) *RegionInstantSnapshotsListCall { ++ c := &RegionInstantSnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +@@ -159914,7 +168369,7 @@ func (r *RegionInstanceTemplatesService) List(project string, region string) *Re + // must match the entire field. For example, to filter for instances + // that do not end with name "instance", you would use `name ne + // .*instance`. +-func (c *RegionInstanceTemplatesListCall) Filter(filter string) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) Filter(filter string) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("filter", filter) + return c + } +@@ -159925,7 +168380,7 @@ func (c *RegionInstanceTemplatesListCall) Filter(filter string) *RegionInstanceT + // a `nextPageToken` that can be used to get the next page of results in + // subsequent list requests. Acceptable values are `0` to `500`, + // inclusive. (Default: `500`) +-func (c *RegionInstanceTemplatesListCall) MaxResults(maxResults int64) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) MaxResults(maxResults int64) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c + } +@@ -159939,7 +168394,7 @@ func (c *RegionInstanceTemplatesListCall) MaxResults(maxResults int64) *RegionIn + // result first). Use this to sort resources like operations so that the + // newest operation is returned first. Currently, only sorting by `name` + // or `creationTimestamp desc` is supported. +-func (c *RegionInstanceTemplatesListCall) OrderBy(orderBy string) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) OrderBy(orderBy string) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c + } +@@ -159947,7 +168402,7 @@ func (c *RegionInstanceTemplatesListCall) OrderBy(orderBy string) *RegionInstanc + // PageToken sets the optional parameter "pageToken": Specifies a page + // token to use. Set `pageToken` to the `nextPageToken` returned by a + // previous list request to get the next page of results. +-func (c *RegionInstanceTemplatesListCall) PageToken(pageToken string) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) PageToken(pageToken string) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c + } +@@ -159956,7 +168411,7 @@ func (c *RegionInstanceTemplatesListCall) PageToken(pageToken string) *RegionIns + // "returnPartialSuccess": Opt-in for partial success behavior which + // provides partial results in case of failure. The default value is + // false. +-func (c *RegionInstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c + } +@@ -159964,7 +168419,7 @@ func (c *RegionInstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSucc + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstanceTemplatesListCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -159974,7 +168429,7 @@ func (c *RegionInstanceTemplatesListCall) Fields(s ...googleapi.Field) *RegionIn + // getting updates only after the object has changed since the last + // request. Use googleapi.IsNotModified to check whether the response + // error from Do is the result of In-None-Match. +-func (c *RegionInstanceTemplatesListCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) IfNoneMatch(entityTag string) *RegionInstantSnapshotsListCall { + c.ifNoneMatch_ = entityTag + return c + } +@@ -159982,21 +168437,21 @@ func (c *RegionInstanceTemplatesListCall) IfNoneMatch(entityTag string) *RegionI + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstanceTemplatesListCall) Context(ctx context.Context) *RegionInstanceTemplatesListCall { ++func (c *RegionInstantSnapshotsListCall) Context(ctx context.Context) *RegionInstantSnapshotsListCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstanceTemplatesListCall) Header() http.Header { ++func (c *RegionInstantSnapshotsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -160009,7 +168464,7 @@ func (c *RegionInstanceTemplatesListCall) doRequest(alt string) (*http.Response, + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { +@@ -160023,14 +168478,14 @@ func (c *RegionInstanceTemplatesListCall) doRequest(alt string) (*http.Response, + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstanceTemplates.list" call. +-// Exactly one of *InstanceTemplateList or error will be non-nil. Any ++// Do executes the "compute.regionInstantSnapshots.list" call. ++// Exactly one of *InstantSnapshotList or error will be non-nil. Any + // non-2xx status code is an error. Response headers are in either +-// *InstanceTemplateList.ServerResponse.Header or (if a response was ++// *InstantSnapshotList.ServerResponse.Header or (if a response was + // returned at all) in error.(*googleapi.Error).Header. Use + // googleapi.IsNotModified to check whether the returned error was + // because http.StatusNotModified was returned. +-func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { ++func (c *RegionInstantSnapshotsListCall) Do(opts ...googleapi.CallOption) (*InstantSnapshotList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -160049,7 +168504,7 @@ func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*Ins + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } +- ret := &InstanceTemplateList{ ++ ret := &InstantSnapshotList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, +@@ -160061,10 +168516,10 @@ func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*Ins + } + return ret, nil + // { +- // "description": "Retrieves a list of instance templates that are contained within the specified project and region.", +- // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", ++ // "description": "Retrieves the list of InstantSnapshot resources contained within the specified region.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots", + // "httpMethod": "GET", +- // "id": "compute.regionInstanceTemplates.list", ++ // "id": "compute.regionInstantSnapshots.list", + // "parameterOrder": [ + // "project", + // "region" +@@ -160101,7 +168556,7 @@ func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*Ins + // "type": "string" + // }, + // "region": { +- // "description": "The name of the regions for this request.", ++ // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, +@@ -160113,9 +168568,9 @@ func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*Ins + // "type": "boolean" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instanceTemplates", ++ // "path": "projects/{project}/regions/{region}/instantSnapshots", + // "response": { +- // "$ref": "InstanceTemplateList" ++ // "$ref": "InstantSnapshotList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", +@@ -160129,7 +168584,7 @@ func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*Ins + // Pages invokes f for each page of results. + // A non-nil error returned from f will halt the iteration. + // The provided context supersedes any context provided to the Context method. +-func (c *RegionInstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { ++func (c *RegionInstantSnapshotsListCall) Pages(ctx context.Context, f func(*InstantSnapshotList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { +@@ -160147,28 +168602,200 @@ func (c *RegionInstanceTemplatesListCall) Pages(ctx context.Context, f func(*Ins + } + } + +-// method id "compute.regionInstances.bulkInsert": ++// method id "compute.regionInstantSnapshots.setIamPolicy": + +-type RegionInstancesBulkInsertCall struct { +- s *Service +- project string +- region string +- bulkinsertinstanceresource *BulkInsertInstanceResource +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header ++type RegionInstantSnapshotsSetIamPolicyCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ regionsetpolicyrequest *RegionSetPolicyRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header + } + +-// BulkInsert: Creates multiple instances in a given region. Count +-// specifies the number of instances to create. ++// SetIamPolicy: Sets the access control policy on the specified ++// resource. Replaces any existing policy. + // + // - project: Project ID for this request. + // - region: The name of the region for this request. +-func (r *RegionInstancesService) BulkInsert(project string, region string, bulkinsertinstanceresource *BulkInsertInstanceResource) *RegionInstancesBulkInsertCall { +- c := &RegionInstancesBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++// - resource: Name or id of the resource for this request. ++func (r *RegionInstantSnapshotsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionInstantSnapshotsSetIamPolicyCall { ++ c := &RegionInstantSnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region +- c.bulkinsertinstanceresource = bulkinsertinstanceresource ++ c.resource = resource ++ c.regionsetpolicyrequest = regionsetpolicyrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstantSnapshotsSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsSetIamPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstantSnapshotsSetIamPolicyCall) Context(ctx context.Context) *RegionInstantSnapshotsSetIamPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstantSnapshotsSetIamPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstantSnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{resource}/setIamPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstantSnapshots.setIamPolicy" call. ++// Exactly one of *Policy or error will be non-nil. Any non-2xx status ++// code is an error. Response headers are in either ++// *Policy.ServerResponse.Header or (if a response was returned at all) ++// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to ++// check whether the returned error was because http.StatusNotModified ++// was returned. ++func (c *RegionInstantSnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Policy{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setIamPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstantSnapshots.setIamPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setIamPolicy", ++ // "request": { ++ // "$ref": "RegionSetPolicyRequest" ++ // }, ++ // "response": { ++ // "$ref": "Policy" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionInstantSnapshots.setLabels": ++ ++type RegionInstantSnapshotsSetLabelsCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ regionsetlabelsrequest *RegionSetLabelsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetLabels: Sets the labels on a instantSnapshot in the given region. ++// To learn more about labels, read the Labeling Resources ++// documentation. ++// ++// - project: Project ID for this request. ++// - region: The region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *RegionInstantSnapshotsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionInstantSnapshotsSetLabelsCall { ++ c := &RegionInstantSnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.resource = resource ++ c.regionsetlabelsrequest = regionsetlabelsrequest + return c + } + +@@ -160183,7 +168810,7 @@ func (r *RegionInstancesService) BulkInsert(project string, region string, bulki + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *RegionInstancesBulkInsertCall) RequestId(requestId string) *RegionInstancesBulkInsertCall { ++func (c *RegionInstantSnapshotsSetLabelsCall) RequestId(requestId string) *RegionInstantSnapshotsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -160191,7 +168818,7 @@ func (c *RegionInstancesBulkInsertCall) RequestId(requestId string) *RegionInsta + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *RegionInstancesBulkInsertCall) Fields(s ...googleapi.Field) *RegionInstancesBulkInsertCall { ++func (c *RegionInstantSnapshotsSetLabelsCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -160199,21 +168826,21 @@ func (c *RegionInstancesBulkInsertCall) Fields(s ...googleapi.Field) *RegionInst + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *RegionInstancesBulkInsertCall) Context(ctx context.Context) *RegionInstancesBulkInsertCall { ++func (c *RegionInstantSnapshotsSetLabelsCall) Context(ctx context.Context) *RegionInstantSnapshotsSetLabelsCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *RegionInstancesBulkInsertCall) Header() http.Header { ++func (c *RegionInstantSnapshotsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, error) { ++func (c *RegionInstantSnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -160221,14 +168848,14 @@ func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, e + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil +- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertinstanceresource) ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instances/bulkInsert") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -160236,20 +168863,21 @@ func (c *RegionInstancesBulkInsertCall) doRequest(alt string) (*http.Response, e + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "region": c.region, ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.regionInstances.bulkInsert" call. ++// Do executes the "compute.regionInstantSnapshots.setLabels" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *RegionInstantSnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -160280,13 +168908,14 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera + } + return ret, nil + // { +- // "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", +- // "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", ++ // "description": "Sets the labels on a instantSnapshot in the given region. To learn more about labels, read the Labeling Resources documentation.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setLabels", + // "httpMethod": "POST", +- // "id": "compute.regionInstances.bulkInsert", ++ // "id": "compute.regionInstantSnapshots.setLabels", + // "parameterOrder": [ + // "project", +- // "region" ++ // "region", ++ // "resource" + // ], + // "parameters": { + // "project": { +@@ -160297,7 +168926,7 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera + // "type": "string" + // }, + // "region": { +- // "description": "The name of the region for this request.", ++ // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, +@@ -160307,11 +168936,18 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" + // } + // }, +- // "path": "projects/{project}/regions/{region}/instances/bulkInsert", ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/setLabels", + // "request": { +- // "$ref": "BulkInsertInstanceResource" ++ // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" +@@ -160324,6 +168960,175 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera + + } + ++// method id "compute.regionInstantSnapshots.testIamPermissions": ++ ++type RegionInstantSnapshotsTestIamPermissionsCall struct { ++ s *Service ++ project string ++ region string ++ resource string ++ testpermissionsrequest *TestPermissionsRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// TestIamPermissions: Returns permissions that a caller has on the ++// specified resource. ++// ++// - project: Project ID for this request. ++// - region: The name of the region for this request. ++// - resource: Name or id of the resource for this request. ++func (r *RegionInstantSnapshotsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstantSnapshotsTestIamPermissionsCall { ++ c := &RegionInstantSnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.resource = resource ++ c.testpermissionsrequest = testpermissionsrequest ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionInstantSnapshotsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstantSnapshotsTestIamPermissionsCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionInstantSnapshotsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstantSnapshotsTestIamPermissionsCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionInstantSnapshotsTestIamPermissionsCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionInstantSnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instantSnapshots/{resource}/testIamPermissions") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "resource": c.resource, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionInstantSnapshots.testIamPermissions" call. ++// Exactly one of *TestPermissionsResponse or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *TestPermissionsResponse.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionInstantSnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &TestPermissionsResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns permissions that a caller has on the specified resource.", ++ // "flatPath": "projects/{project}/regions/{region}/instantSnapshots/{resource}/testIamPermissions", ++ // "httpMethod": "POST", ++ // "id": "compute.regionInstantSnapshots.testIamPermissions", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "resource" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "The name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "resource": { ++ // "description": "Name or id of the resource for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/instantSnapshots/{resource}/testIamPermissions", ++ // "request": { ++ // "$ref": "TestPermissionsRequest" ++ // }, ++ // "response": { ++ // "$ref": "TestPermissionsResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionNetworkEndpointGroups.delete": + + type RegionNetworkEndpointGroupsDeleteCall struct { +@@ -166167,6 +174972,185 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + ++// method id "compute.regionSecurityPolicies.addRule": ++ ++type RegionSecurityPoliciesAddRuleCall struct { ++ s *Service ++ project string ++ region string ++ securityPolicy string ++ securitypolicyrule *SecurityPolicyRule ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// AddRule: Inserts a rule into a security policy. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - securityPolicy: Name of the security policy to update. ++func (r *RegionSecurityPoliciesService) AddRule(project string, region string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *RegionSecurityPoliciesAddRuleCall { ++ c := &RegionSecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.securityPolicy = securityPolicy ++ c.securitypolicyrule = securitypolicyrule ++ return c ++} ++ ++// ValidateOnly sets the optional parameter "validateOnly": If true, the ++// request will not be committed. ++func (c *RegionSecurityPoliciesAddRuleCall) ValidateOnly(validateOnly bool) *RegionSecurityPoliciesAddRuleCall { ++ c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionSecurityPoliciesAddRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesAddRuleCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionSecurityPoliciesAddRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesAddRuleCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionSecurityPoliciesAddRuleCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionSecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "securityPolicy": c.securityPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionSecurityPolicies.addRule" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionSecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Inserts a rule into a security policy.", ++ // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", ++ // "httpMethod": "POST", ++ // "id": "compute.regionSecurityPolicies.addRule", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "securityPolicy" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "securityPolicy": { ++ // "description": "Name of the security policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "validateOnly": { ++ // "description": "If true, the request will not be committed.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", ++ // "request": { ++ // "$ref": "SecurityPolicyRule" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionSecurityPolicies.delete": + + type RegionSecurityPoliciesDeleteCall struct { +@@ -166518,6 +175502,192 @@ func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*Secur + + } + ++// method id "compute.regionSecurityPolicies.getRule": ++ ++type RegionSecurityPoliciesGetRuleCall struct { ++ s *Service ++ project string ++ region string ++ securityPolicy string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// GetRule: Gets a rule at the specified priority. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - securityPolicy: Name of the security policy to which the queried ++// rule belongs. ++func (r *RegionSecurityPoliciesService) GetRule(project string, region string, securityPolicy string) *RegionSecurityPoliciesGetRuleCall { ++ c := &RegionSecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.securityPolicy = securityPolicy ++ return c ++} ++ ++// Priority sets the optional parameter "priority": The priority of the ++// rule to get from the security policy. ++func (c *RegionSecurityPoliciesGetRuleCall) Priority(priority int64) *RegionSecurityPoliciesGetRuleCall { ++ c.urlParams_.Set("priority", fmt.Sprint(priority)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionSecurityPoliciesGetRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesGetRuleCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *RegionSecurityPoliciesGetRuleCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesGetRuleCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionSecurityPoliciesGetRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesGetRuleCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionSecurityPoliciesGetRuleCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionSecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "securityPolicy": c.securityPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionSecurityPolicies.getRule" call. ++// Exactly one of *SecurityPolicyRule or error will be non-nil. Any ++// non-2xx status code is an error. Response headers are in either ++// *SecurityPolicyRule.ServerResponse.Header or (if a response was ++// returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *RegionSecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyRule, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &SecurityPolicyRule{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Gets a rule at the specified priority.", ++ // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", ++ // "httpMethod": "GET", ++ // "id": "compute.regionSecurityPolicies.getRule", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "securityPolicy" ++ // ], ++ // "parameters": { ++ // "priority": { ++ // "description": "The priority of the rule to get from the security policy.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "securityPolicy": { ++ // "description": "Name of the security policy to which the queried rule belongs.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", ++ // "response": { ++ // "$ref": "SecurityPolicyRule" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionSecurityPolicies.insert": + + type RegionSecurityPoliciesInsertCall struct { +@@ -167191,6 +176361,370 @@ func (c *RegionSecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Ope + + } + ++// method id "compute.regionSecurityPolicies.patchRule": ++ ++type RegionSecurityPoliciesPatchRuleCall struct { ++ s *Service ++ project string ++ region string ++ securityPolicy string ++ securitypolicyrule *SecurityPolicyRule ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// PatchRule: Patches a rule at the specified priority. To clear fields ++// in the rule, leave the fields empty and specify them in the ++// updateMask. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - securityPolicy: Name of the security policy to update. ++func (r *RegionSecurityPoliciesService) PatchRule(project string, region string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *RegionSecurityPoliciesPatchRuleCall { ++ c := &RegionSecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.securityPolicy = securityPolicy ++ c.securitypolicyrule = securitypolicyrule ++ return c ++} ++ ++// Priority sets the optional parameter "priority": The priority of the ++// rule to patch. ++func (c *RegionSecurityPoliciesPatchRuleCall) Priority(priority int64) *RegionSecurityPoliciesPatchRuleCall { ++ c.urlParams_.Set("priority", fmt.Sprint(priority)) ++ return c ++} ++ ++// ValidateOnly sets the optional parameter "validateOnly": If true, the ++// request will not be committed. ++func (c *RegionSecurityPoliciesPatchRuleCall) ValidateOnly(validateOnly bool) *RegionSecurityPoliciesPatchRuleCall { ++ c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionSecurityPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesPatchRuleCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionSecurityPoliciesPatchRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesPatchRuleCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionSecurityPoliciesPatchRuleCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionSecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "securityPolicy": c.securityPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionSecurityPolicies.patchRule" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionSecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Patches a rule at the specified priority. To clear fields in the rule, leave the fields empty and specify them in the updateMask.", ++ // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", ++ // "httpMethod": "POST", ++ // "id": "compute.regionSecurityPolicies.patchRule", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "securityPolicy" ++ // ], ++ // "parameters": { ++ // "priority": { ++ // "description": "The priority of the rule to patch.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "securityPolicy": { ++ // "description": "Name of the security policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "validateOnly": { ++ // "description": "If true, the request will not be committed.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", ++ // "request": { ++ // "$ref": "SecurityPolicyRule" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.regionSecurityPolicies.removeRule": ++ ++type RegionSecurityPoliciesRemoveRuleCall struct { ++ s *Service ++ project string ++ region string ++ securityPolicy string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// RemoveRule: Deletes a rule at the specified priority. ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - securityPolicy: Name of the security policy to update. ++func (r *RegionSecurityPoliciesService) RemoveRule(project string, region string, securityPolicy string) *RegionSecurityPoliciesRemoveRuleCall { ++ c := &RegionSecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.securityPolicy = securityPolicy ++ return c ++} ++ ++// Priority sets the optional parameter "priority": The priority of the ++// rule to remove from the security policy. ++func (c *RegionSecurityPoliciesRemoveRuleCall) Priority(priority int64) *RegionSecurityPoliciesRemoveRuleCall { ++ c.urlParams_.Set("priority", fmt.Sprint(priority)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *RegionSecurityPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesRemoveRuleCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *RegionSecurityPoliciesRemoveRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesRemoveRuleCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *RegionSecurityPoliciesRemoveRuleCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *RegionSecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "securityPolicy": c.securityPolicy, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.regionSecurityPolicies.removeRule" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *RegionSecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Deletes a rule at the specified priority.", ++ // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", ++ // "httpMethod": "POST", ++ // "id": "compute.regionSecurityPolicies.removeRule", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "securityPolicy" ++ // ], ++ // "parameters": { ++ // "priority": { ++ // "description": "The priority of the rule to remove from the security policy.", ++ // "format": "int32", ++ // "location": "query", ++ // "type": "integer" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "securityPolicy": { ++ // "description": "Name of the security policy to update.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.regionSslCertificates.delete": + + type RegionSslCertificatesDeleteCall struct { +@@ -199923,6 +209457,196 @@ func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInsta + } + } + ++// method id "compute.targetInstances.setSecurityPolicy": ++ ++type TargetInstancesSetSecurityPolicyCall struct { ++ s *Service ++ project string ++ zone string ++ targetInstance string ++ securitypolicyreference *SecurityPolicyReference ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified target instance. For more information, see Google Cloud ++// Armor Overview ++// ++// - project: Project ID for this request. ++// - targetInstance: Name of the TargetInstance resource to which the ++// security policy should be set. The name should conform to RFC1035. ++// - zone: Name of the zone scoping this request. ++func (r *TargetInstancesService) SetSecurityPolicy(project string, zone string, targetInstance string, securitypolicyreference *SecurityPolicyReference) *TargetInstancesSetSecurityPolicyCall { ++ c := &TargetInstancesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.targetInstance = targetInstance ++ c.securitypolicyreference = securitypolicyreference ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *TargetInstancesSetSecurityPolicyCall) RequestId(requestId string) *TargetInstancesSetSecurityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *TargetInstancesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *TargetInstancesSetSecurityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *TargetInstancesSetSecurityPolicyCall) Context(ctx context.Context) *TargetInstancesSetSecurityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *TargetInstancesSetSecurityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *TargetInstancesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "targetInstance": c.targetInstance, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.targetInstances.setSecurityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *TargetInstancesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Google Cloud Armor security policy for the specified target instance. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.targetInstances.setSecurityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "targetInstance" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "targetInstance": { ++ // "description": "Name of the TargetInstance resource to which the security policy should be set. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "Name of the zone scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", ++ // "request": { ++ // "$ref": "SecurityPolicyReference" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.targetInstances.testIamPermissions": + + type TargetInstancesTestIamPermissionsCall struct { +@@ -202333,6 +212057,196 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, + + } + ++// method id "compute.targetPools.setSecurityPolicy": ++ ++type TargetPoolsSetSecurityPolicyCall struct { ++ s *Service ++ project string ++ region string ++ targetPool string ++ securitypolicyreference *SecurityPolicyReference ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SetSecurityPolicy: Sets the Google Cloud Armor security policy for ++// the specified target pool. For more information, see Google Cloud ++// Armor Overview ++// ++// - project: Project ID for this request. ++// - region: Name of the region scoping this request. ++// - targetPool: Name of the TargetPool resource to which the security ++// policy should be set. The name should conform to RFC1035. ++func (r *TargetPoolsService) SetSecurityPolicy(project string, region string, targetPool string, securitypolicyreference *SecurityPolicyReference) *TargetPoolsSetSecurityPolicyCall { ++ c := &TargetPoolsSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.targetPool = targetPool ++ c.securitypolicyreference = securitypolicyreference ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *TargetPoolsSetSecurityPolicyCall) RequestId(requestId string) *TargetPoolsSetSecurityPolicyCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *TargetPoolsSetSecurityPolicyCall) Fields(s ...googleapi.Field) *TargetPoolsSetSecurityPolicyCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *TargetPoolsSetSecurityPolicyCall) Context(ctx context.Context) *TargetPoolsSetSecurityPolicyCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *TargetPoolsSetSecurityPolicyCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *TargetPoolsSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "targetPool": c.targetPool, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.targetPools.setSecurityPolicy" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *TargetPoolsSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Sets the Google Cloud Armor security policy for the specified target pool. For more information, see Google Cloud Armor Overview", ++ // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ // "httpMethod": "POST", ++ // "id": "compute.targetPools.setSecurityPolicy", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "targetPool" ++ // ], ++ // "parameters": { ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "targetPool": { ++ // "description": "Name of the TargetPool resource to which the security policy should be set. The name should conform to RFC1035.", ++ // "location": "path", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", ++ // "request": { ++ // "$ref": "SecurityPolicyReference" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.targetPools.testIamPermissions": + + type TargetPoolsTestIamPermissionsCall struct { +diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json +index b380f1a7034..4ef0af54620 100644 +--- a/vendor/google.golang.org/api/compute/v1/compute-api.json ++++ b/vendor/google.golang.org/api/compute/v1/compute-api.json +@@ -550,6 +550,56 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "move": { ++ "description": "Moves the specified address resource.", ++ "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", ++ "httpMethod": "POST", ++ "id": "compute.addresses.move", ++ "parameterOrder": [ ++ "project", ++ "region", ++ "address" ++ ], ++ "parameters": { ++ "address": { ++ "description": "Name of the address resource to move.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Source project ID which the Address is moved from.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "region": { ++ "description": "Name of the region for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/regions/{region}/addresses/{address}/move", ++ "request": { ++ "$ref": "RegionAddressesMoveRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setLabels": { + "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", +@@ -4497,6 +4547,48 @@ + "https://www.googleapis.com/auth/compute.readonly" + ] + }, ++ "move": { ++ "description": "Moves the specified address resource from one project to another project.", ++ "flatPath": "projects/{project}/global/addresses/{address}/move", ++ "httpMethod": "POST", ++ "id": "compute.globalAddresses.move", ++ "parameterOrder": [ ++ "project", ++ "address" ++ ], ++ "parameters": { ++ "address": { ++ "description": "Name of the address resource to move.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Source project ID which the Address is moved from.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/addresses/{address}/move", ++ "request": { ++ "$ref": "GlobalAddressesMoveRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "setLabels": { + "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", +@@ -10647,6 +10739,11 @@ + "required": true, + "type": "string" + }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", +@@ -11669,6 +11766,100 @@ + } + } + }, ++ "interconnectRemoteLocations": { ++ "methods": { ++ "get": { ++ "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", ++ "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ "httpMethod": "GET", ++ "id": "compute.interconnectRemoteLocations.get", ++ "parameterOrder": [ ++ "project", ++ "interconnectRemoteLocation" ++ ], ++ "parameters": { ++ "interconnectRemoteLocation": { ++ "description": "Name of the interconnect remote location to return.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ "response": { ++ "$ref": "InterconnectRemoteLocation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ }, ++ "list": { ++ "description": "Retrieves the list of interconnect remote locations available to the specified project.", ++ "flatPath": "projects/{project}/global/interconnectRemoteLocations", ++ "httpMethod": "GET", ++ "id": "compute.interconnectRemoteLocations.list", ++ "parameterOrder": [ ++ "project" ++ ], ++ "parameters": { ++ "filter": { ++ "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ "location": "query", ++ "type": "string" ++ }, ++ "maxResults": { ++ "default": "500", ++ "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ "format": "uint32", ++ "location": "query", ++ "minimum": "0", ++ "type": "integer" ++ }, ++ "orderBy": { ++ "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ "location": "query", ++ "type": "string" ++ }, ++ "pageToken": { ++ "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ "location": "query", ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "returnPartialSuccess": { ++ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ "location": "query", ++ "type": "boolean" ++ } ++ }, ++ "path": "projects/{project}/global/interconnectRemoteLocations", ++ "response": { ++ "$ref": "InterconnectRemoteLocationList" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute", ++ "https://www.googleapis.com/auth/compute.readonly" ++ ] ++ } ++ } ++ }, + "interconnects": { + "methods": { + "delete": { +@@ -15712,6 +15903,56 @@ + "https://www.googleapis.com/auth/compute" + ] + }, ++ "simulateMaintenanceEvent": { ++ "description": "Simulates maintenance event on specified nodes from the node group.", ++ "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", ++ "httpMethod": "POST", ++ "id": "compute.nodeGroups.simulateMaintenanceEvent", ++ "parameterOrder": [ ++ "project", ++ "zone", ++ "nodeGroup" ++ ], ++ "parameters": { ++ "nodeGroup": { ++ "description": "Name of the NodeGroup resource whose nodes will go under maintenance simulation.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ "required": true, ++ "type": "string" ++ }, ++ "project": { ++ "description": "Project ID for this request.", ++ "location": "path", ++ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ "required": true, ++ "type": "string" ++ }, ++ "requestId": { ++ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ "location": "query", ++ "type": "string" ++ }, ++ "zone": { ++ "description": "The name of the zone for this request.", ++ "location": "path", ++ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", ++ "request": { ++ "$ref": "NodeGroupsSimulateMaintenanceEventRequest" ++ }, ++ "response": { ++ "$ref": "Operation" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform", ++ "https://www.googleapis.com/auth/compute" ++ ] ++ }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", +@@ -25956,6 +26197,11 @@ + "minimum": "0", + "type": "integer" + }, ++ "natName": { ++ "description": "Name of the nat service to filter the Nat Mapping information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", ++ "location": "query", ++ "type": "string" ++ }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", +@@ -33123,7 +33369,7 @@ + } + } + }, +- "revision": "20230307", ++ "revision": "20230516", + "rootUrl": "https://compute.googleapis.com/", + "schemas": { + "AcceleratorConfig": { +@@ -33543,11 +33789,11 @@ + "id": "AccessConfig", + "properties": { + "externalIpv6": { +- "description": "The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", ++ "description": "Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", + "type": "string" + }, + "externalIpv6PrefixLength": { +- "description": "The prefix length of the external IPv6 range.", ++ "description": "Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range.", + "format": "int32", + "type": "integer" + }, +@@ -33557,11 +33803,11 @@ + "type": "string" + }, + "name": { +- "description": "The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.", ++ "description": "The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6.", + "type": "string" + }, + "natIP": { +- "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", ++ "description": "Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", + "type": "string" + }, + "networkTier": { +@@ -33589,8 +33835,7 @@ + "type": "boolean" + }, + "type": { +- "default": "ONE_TO_ONE_NAT", +- "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", ++ "description": "The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6.", + "enum": [ + "DIRECT_IPV6", + "ONE_TO_ONE_NAT" +@@ -33670,6 +33915,18 @@ + "description": "[Output Only] Type of the resource. Always compute#address for addresses.", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this Address, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an Address.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "name": { + "annotations": { + "required": [ +@@ -34357,6 +34614,18 @@ + ], + "type": "string" + }, ++ "savedState": { ++ "description": "For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api.", ++ "enum": [ ++ "DISK_SAVED_STATE_UNSPECIFIED", ++ "PRESERVED" ++ ], ++ "enumDescriptions": [ ++ "*[Default]* Disk state has not been preserved.", ++ "Disk state has been preserved." ++ ], ++ "type": "string" ++ }, + "shieldedInstanceInitialState": { + "$ref": "InitialStateConfig", + "description": "[Output Only] shielded vm initial state stored on disk" +@@ -34448,6 +34717,18 @@ + "format": "int64", + "type": "string" + }, ++ "provisionedThroughput": { ++ "description": "Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be between 1 and 7,124.", ++ "format": "int64", ++ "type": "string" ++ }, ++ "replicaZones": { ++ "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. You can't use this option with boot disks.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "resourceManagerTags": { + "additionalProperties": { + "type": "string" +@@ -35067,7 +35348,7 @@ + "id": "AutoscalingPolicy", + "properties": { + "coolDownPeriodSec": { +- "description": "The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", ++ "description": "The number of seconds that your application takes to initialize on a VM instance. This is referred to as the [initialization period](/compute/docs/autoscaler#cool_down_period). Specifying an accurate initialization period improves autoscaler decisions. For example, when scaling out, the autoscaler ignores data from VMs that are still initializing because those VMs might not yet represent normal usage of your application. The default initialization period is 60 seconds. Initialization periods might vary because of numerous factors. We recommend that you test how long your application takes to initialize. To do this, create a VM and time your application's startup process.", + "format": "int32", + "type": "integer" + }, +@@ -35097,7 +35378,7 @@ + "type": "integer" + }, + "mode": { +- "description": "Defines operating mode for this policy.", ++ "description": "Defines the operating mode for this policy. The following modes are available: - OFF: Disables the autoscaler but maintains its configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM instances only. - ON: Enables all autoscaler activities according to its policy. For more information, see \"Turning off or restricting an autoscaler\"", + "enum": [ + "OFF", + "ON", +@@ -35806,6 +36087,13 @@ + "$ref": "Duration", + "description": "Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED." + }, ++ "metadatas": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Deployment metadata associated with the resource to be set by a GKE hub controller and read by the backend RCTH", ++ "type": "object" ++ }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +@@ -37095,7 +37383,7 @@ + "type": "string" + }, + "splitSourceCommitment": { +- "description": "Source commitment to be splitted into a new commitment.", ++ "description": "Source commitment to be split into a new commitment.", + "type": "string" + }, + "startTimestamp": { +@@ -37136,6 +37424,7 @@ + "GENERAL_PURPOSE_N2", + "GENERAL_PURPOSE_N2D", + "GENERAL_PURPOSE_T2D", ++ "GRAPHICS_OPTIMIZED", + "MEMORY_OPTIMIZED", + "MEMORY_OPTIMIZED_M3", + "TYPE_UNSPECIFIED" +@@ -37152,6 +37441,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -37802,6 +38092,17 @@ + ], + "type": "string" + }, ++ "asyncPrimaryDisk": { ++ "$ref": "DiskAsyncReplication", ++ "description": "Disk asynchronously replicated into this disk." ++ }, ++ "asyncSecondaryDisks": { ++ "additionalProperties": { ++ "$ref": "DiskAsyncReplicationList" ++ }, ++ "description": "[Output Only] A list of disks this disk is asynchronously replicated to.", ++ "type": "object" ++ }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" +@@ -37898,6 +38199,11 @@ + "format": "int64", + "type": "string" + }, ++ "provisionedThroughput": { ++ "description": "Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be between 1 and 7,124.", ++ "format": "int64", ++ "type": "string" ++ }, + "region": { + "description": "[Output Only] URL of the region where the disk resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" +@@ -37916,6 +38222,10 @@ + }, + "type": "array" + }, ++ "resourceStatus": { ++ "$ref": "DiskResourceStatus", ++ "description": "[Output Only] Status information for the disk resource." ++ }, + "satisfiesPzs": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" +@@ -37929,6 +38239,14 @@ + "format": "int64", + "type": "string" + }, ++ "sourceConsistencyGroupPolicy": { ++ "description": "[Output Only] URL of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", ++ "type": "string" ++ }, ++ "sourceConsistencyGroupPolicyId": { ++ "description": "[Output Only] ID of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", ++ "type": "string" ++ }, + "sourceDisk": { + "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", + "type": "string" +@@ -38128,6 +38446,37 @@ + }, + "type": "object" + }, ++ "DiskAsyncReplication": { ++ "id": "DiskAsyncReplication", ++ "properties": { ++ "consistencyGroupPolicy": { ++ "description": "[Output Only] URL of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, ++ "consistencyGroupPolicyId": { ++ "description": "[Output Only] ID of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", ++ "type": "string" ++ }, ++ "disk": { ++ "description": "The other disk asynchronously replicated to or from the current disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk ", ++ "type": "string" ++ }, ++ "diskId": { ++ "description": "[Output Only] The unique ID of the other disk asynchronously replicated to or from the current disk. This value identifies the exact disk that was used to create this replication. For example, if you started replicating the persistent disk from a disk that was later deleted and recreated under the same name, the disk ID would identify the exact version of the disk that was used.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "DiskAsyncReplicationList": { ++ "id": "DiskAsyncReplicationList", ++ "properties": { ++ "asyncReplicationDisk": { ++ "$ref": "DiskAsyncReplication" ++ } ++ }, ++ "type": "object" ++ }, + "DiskInstantiationConfig": { + "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", + "id": "DiskInstantiationConfig", +@@ -38317,6 +38666,47 @@ + }, + "type": "object" + }, ++ "DiskResourceStatus": { ++ "id": "DiskResourceStatus", ++ "properties": { ++ "asyncPrimaryDisk": { ++ "$ref": "DiskResourceStatusAsyncReplicationStatus" ++ }, ++ "asyncSecondaryDisks": { ++ "additionalProperties": { ++ "$ref": "DiskResourceStatusAsyncReplicationStatus" ++ }, ++ "description": "Key: disk, value: AsyncReplicationStatus message", ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "DiskResourceStatusAsyncReplicationStatus": { ++ "id": "DiskResourceStatusAsyncReplicationStatus", ++ "properties": { ++ "state": { ++ "enum": [ ++ "ACTIVE", ++ "CREATED", ++ "STARTING", ++ "STATE_UNSPECIFIED", ++ "STOPPED", ++ "STOPPING" ++ ], ++ "enumDescriptions": [ ++ "Replication is active.", ++ "Secondary disk is created and is waiting for replication to start.", ++ "Replication is starting.", ++ "", ++ "Replication is stopped.", ++ "Replication is stopping." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "DiskType": { + "description": "Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/v1/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks.", + "id": "DiskType", +@@ -39982,6 +40372,20 @@ + "description": "Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified.", + "id": "FirewallPolicyRuleMatcher", + "properties": { ++ "destAddressGroups": { ++ "description": "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "destFqdns": { ++ "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "destIpRanges": { + "description": "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000.", + "items": { +@@ -39989,6 +40393,20 @@ + }, + "type": "array" + }, ++ "destRegionCodes": { ++ "description": "Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of dest region codes allowed is 5000.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "destThreatIntelligences": { ++ "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "layer4Configs": { + "description": "Pairs of IP protocols and ports that the rule should match.", + "items": { +@@ -39996,6 +40414,20 @@ + }, + "type": "array" + }, ++ "srcAddressGroups": { ++ "description": "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "srcFqdns": { ++ "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "srcIpRanges": { + "description": "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000.", + "items": { +@@ -40003,12 +40435,26 @@ + }, + "type": "array" + }, ++ "srcRegionCodes": { ++ "description": "Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of source region codes allowed is 5000.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, + "srcSecureTags": { + "description": "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", + "items": { + "$ref": "FirewallPolicyRuleSecureTag" + }, + "type": "array" ++ }, ++ "srcThreatIntelligences": { ++ "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" + } + }, + "type": "object" +@@ -40113,6 +40559,10 @@ + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If the field is set to TRUE, clients can access ILB from all regions. Otherwise only allows access from clients in the same region as the internal load balancer.", + "type": "boolean" + }, ++ "allowPscGlobalAccess": { ++ "description": "This is used in PSC consumer ForwardingRule to control whether the PSC endpoint can be accessed from another region.", ++ "type": "boolean" ++ }, + "backendService": { + "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", + "type": "string" +@@ -40207,7 +40657,7 @@ + "type": "string" + }, + "network": { +- "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", ++ "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "type": "string" + }, + "networkTier": { +@@ -40717,6 +41167,20 @@ + }, + "type": "object" + }, ++ "GlobalAddressesMoveRequest": { ++ "id": "GlobalAddressesMoveRequest", ++ "properties": { ++ "description": { ++ "description": "An optional destination address description if intended to be different from the source.", ++ "type": "string" ++ }, ++ "destinationAddress": { ++ "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project /global/addresses/address - projects/project/global/addresses/address Note that destination project must be different from the source project. So /global/addresses/address is not valid partial url.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { + "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", + "properties": { +@@ -40874,13 +41338,14 @@ + "id": "GuestOsFeature", + "properties": { + "type": { +- "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling guest operating system features.", ++ "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.", + "enum": [ + "FEATURE_TYPE_UNSPECIFIED", + "GVNIC", + "MULTI_IP_SUBNET", + "SECURE_BOOT", + "SEV_CAPABLE", ++ "SEV_LIVE_MIGRATABLE", + "SEV_SNP_CAPABLE", + "UEFI_COMPATIBLE", + "VIRTIO_SCSI_MULTIQUEUE", +@@ -40895,6 +41360,7 @@ + "", + "", + "", ++ "", + "" + ], + "type": "string" +@@ -41062,7 +41528,7 @@ + "type": "object" + }, + "HealthCheck": { +- "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/v1/healthChecks) * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.HealthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", ++ "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/v1/healthChecks) * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.healthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.healthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", + "id": "HealthCheck", + "properties": { + "checkIntervalSec": { +@@ -41757,7 +42223,7 @@ + "type": "string" + }, + "healthState": { +- "description": "Health state of the instance.", ++ "description": "Health state of the IPv4 address of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" +@@ -41830,10 +42296,10 @@ + "UNKNOWN" + ], + "enumDescriptions": [ +- "", +- "", +- "", +- "" ++ "Endpoint is being drained.", ++ "Endpoint is healthy.", ++ "Endpoint is unhealthy.", ++ "Health status of the endpoint is unknown." + ], + "type": "string" + } +@@ -42416,6 +42882,10 @@ + }, + "type": "array" + }, ++ "pathTemplateMatch": { ++ "description": "If specified, the route is a pattern match expression that must match the :path header once the query string is removed. A pattern match allows you to match - The value must be between 1 and 1024 characters - The pattern must start with a leading slash (\"/\") - There may be no more than 5 operators in pattern Precisely one of prefix_match, full_path_match, regex_match or path_template_match must be set.", ++ "type": "string" ++ }, + "prefixMatch": { + "description": "For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "type": "string" +@@ -43218,7 +43688,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -44080,7 +44550,7 @@ + "type": "string" + }, + "initialDelaySec": { +- "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", ++ "description": "The initial delay is the number of seconds that a new VM takes to initialize and run its startup script. During a VM's initial delay period, the MIG ignores unsuccessful health checks because the VM might be in the startup process. This prevents the MIG from prematurely recreating a VM. If the health check receives a healthy response during the initial delay, it indicates that the startup process is complete and the VM is ready. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.", + "format": "int32", + "type": "integer" + } +@@ -44444,7 +44914,7 @@ + "id": "InstanceGroupManagersDeleteInstancesRequest", + "properties": { + "instances": { +- "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", ++ "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have URL and can be deleted only by name. One cannot specify both URLs and names in a single request.", + "items": { + "type": "string" + }, +@@ -45986,7 +46456,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -46303,7 +46773,7 @@ + "type": "object" + }, + "Interconnect": { +- "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", ++ "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the Google Cloud network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", + "id": "Interconnect", + "properties": { + "adminEnabled": { +@@ -46375,6 +46845,18 @@ + "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this Interconnect, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an Interconnect.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "linkType": { + "description": "Type of link requested, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.", + "enum": [ +@@ -46426,6 +46908,10 @@ + "format": "int32", + "type": "integer" + }, ++ "remoteLocation": { ++ "description": "Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.", ++ "type": "string" ++ }, + "requestedLinkCount": { + "description": "Target number of physical links in the link bundle, as requested by the customer.", + "format": "int32", +@@ -46520,6 +47006,10 @@ + "description": "This field is not available.", + "type": "string" + }, ++ "configurationConstraints": { ++ "$ref": "InterconnectAttachmentConfigurationConstraints", ++ "description": "[Output Only] Constraints for this attachment, if any. The attachment does not work if these constraints are not met." ++ }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" +@@ -46585,7 +47075,7 @@ + "type": "string" + }, + "ipsecInternalAddresses": { +- "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool. Not currently available publicly. ", ++ "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool.", + "items": { + "type": "string" + }, +@@ -46596,6 +47086,18 @@ + "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this InterconnectAttachment, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an InterconnectAttachment.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "mtu": { + "description": "Maximum Transmission Unit (MTU), in bytes, of packets passing through this interconnect attachment. Only 1440 and 1500 are allowed. If not specified, the value will default to 1440.", + "format": "int32", +@@ -46639,6 +47141,10 @@ + "description": "[Output Only] URL of the region where the regional interconnect attachment resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, ++ "remoteService": { ++ "description": "[Output Only] If the attachment is on a Cross-Cloud Interconnect connection, this field contains the interconnect's remote location service provider. Example values: \"Amazon Web Services\" \"Microsoft Azure\". The field is set only for attachments on Cross-Cloud Interconnect connections. Its value is copied from the InterconnectRemoteLocation remoteService field.", ++ "type": "string" ++ }, + "router": { + "description": "URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network \u0026 region within which the Cloud Router is configured.", + "type": "string" +@@ -46685,6 +47191,11 @@ + ], + "type": "string" + }, ++ "subnetLength": { ++ "description": "Length of the IPv4 subnet mask. Allowed values: - 29 (default) - 30 The default value is 29, except for Cross-Cloud Interconnect connections that use an InterconnectRemoteLocation with a constraints.subnetLengthRange.min equal to 30. For example, connections that use an Azure remote location fall into this category. In these cases, the default value is 30, and requesting 29 returns an error. Where both 29 and 30 are allowed, 29 is preferred, because it gives Google Cloud Support more debugging visibility. ", ++ "format": "int32", ++ "type": "integer" ++ }, + "type": { + "description": "The type of interconnect attachment this is, which can take one of the following values: - DEDICATED: an attachment to a Dedicated Interconnect. - PARTNER: an attachment to a Partner Interconnect, created by the customer. - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner. ", + "enum": [ +@@ -46834,6 +47345,47 @@ + }, + "type": "object" + }, ++ "InterconnectAttachmentConfigurationConstraints": { ++ "id": "InterconnectAttachmentConfigurationConstraints", ++ "properties": { ++ "bgpMd5": { ++ "description": "[Output Only] Whether the attachment's BGP session requires/allows/disallows BGP MD5 authentication. This can take one of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. For example, a Cross-Cloud Interconnect connection to a remote cloud provider that requires BGP MD5 authentication has the interconnectRemoteLocation attachment_configuration_constraints.bgp_md5 field set to MD5_REQUIRED, and that property is propagated to the attachment. Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 is requested.", ++ "enum": [ ++ "MD5_OPTIONAL", ++ "MD5_REQUIRED", ++ "MD5_UNSUPPORTED" ++ ], ++ "enumDescriptions": [ ++ "MD5_OPTIONAL: BGP MD5 authentication is supported and can optionally be configured.", ++ "MD5_REQUIRED: BGP MD5 authentication must be configured.", ++ "MD5_UNSUPPORTED: BGP MD5 authentication must not be configured" ++ ], ++ "type": "string" ++ }, ++ "bgpPeerAsnRanges": { ++ "description": "[Output Only] List of ASN ranges that the remote location is known to support. Formatted as an array of inclusive ranges {min: min-value, max: max-value}. For example, [{min: 123, max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or anything in the range 64512-65534. This field is only advisory. Although the API accepts other ranges, these are the ranges that we recommend.", ++ "items": { ++ "$ref": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange": { ++ "id": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange", ++ "properties": { ++ "max": { ++ "format": "uint32", ++ "type": "integer" ++ }, ++ "min": { ++ "format": "uint32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "InterconnectAttachmentList": { + "description": "Response to the list request, and contains a list of interconnect attachments.", + "id": "InterconnectAttachmentList", +@@ -47727,6 +48279,308 @@ + }, + "type": "object" + }, ++ "InterconnectRemoteLocation": { ++ "description": "Represents a Cross-Cloud Interconnect Remote Location resource. You can use this resource to find remote location details about an Interconnect attachment (VLAN).", ++ "id": "InterconnectRemoteLocation", ++ "properties": { ++ "address": { ++ "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character.", ++ "type": "string" ++ }, ++ "attachmentConfigurationConstraints": { ++ "$ref": "InterconnectAttachmentConfigurationConstraints", ++ "description": "[Output Only] Subset of fields from InterconnectAttachment's |configurationConstraints| field that apply to all attachments for this remote location." ++ }, ++ "city": { ++ "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", ++ "type": "string" ++ }, ++ "constraints": { ++ "$ref": "InterconnectRemoteLocationConstraints", ++ "description": "[Output Only] Constraints on the parameters for creating Cross-Cloud Interconnect and associated InterconnectAttachments." ++ }, ++ "continent": { ++ "description": "[Output Only] Continent for this location, which can take one of the following values: - AFRICA - ASIA_PAC - EUROPE - NORTH_AMERICA - SOUTH_AMERICA ", ++ "enum": [ ++ "AFRICA", ++ "ASIA_PAC", ++ "EUROPE", ++ "NORTH_AMERICA", ++ "SOUTH_AMERICA" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ }, ++ "creationTimestamp": { ++ "description": "[Output Only] Creation timestamp in RFC3339 text format.", ++ "type": "string" ++ }, ++ "description": { ++ "description": "[Output Only] An optional description of the resource.", ++ "type": "string" ++ }, ++ "facilityProvider": { ++ "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX).", ++ "type": "string" ++ }, ++ "facilityProviderFacilityId": { ++ "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1).", ++ "type": "string" ++ }, ++ "id": { ++ "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", ++ "format": "uint64", ++ "type": "string" ++ }, ++ "kind": { ++ "default": "compute#interconnectRemoteLocation", ++ "description": "[Output Only] Type of the resource. Always compute#interconnectRemoteLocation for interconnect remote locations.", ++ "type": "string" ++ }, ++ "lacp": { ++ "description": "[Output Only] Link Aggregation Control Protocol (LACP) constraints, which can take one of the following values: LACP_SUPPORTED, LACP_UNSUPPORTED", ++ "enum": [ ++ "LACP_SUPPORTED", ++ "LACP_UNSUPPORTED" ++ ], ++ "enumDescriptions": [ ++ "LACP_SUPPORTED: LACP is supported, and enabled by default on the Cross-Cloud Interconnect.", ++ "LACP_UNSUPPORTED: LACP is not supported and is not be enabled on this port. GetDiagnostics shows bundleAggregationType as \"static\". GCP does not support LAGs without LACP, so requestedLinkCount must be 1." ++ ], ++ "type": "string" ++ }, ++ "maxLagSize100Gbps": { ++ "description": "[Output Only] The maximum number of 100 Gbps ports supported in a link aggregation group (LAG). When linkType is 100 Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "maxLagSize10Gbps": { ++ "description": "[Output Only] The maximum number of 10 Gbps ports supported in a link aggregation group (LAG). When linkType is 10 Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps.", ++ "format": "int32", ++ "type": "integer" ++ }, ++ "name": { ++ "description": "[Output Only] Name of the resource.", ++ "type": "string" ++ }, ++ "peeringdbFacilityId": { ++ "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb).", ++ "type": "string" ++ }, ++ "permittedConnections": { ++ "description": "[Output Only] Permitted connections.", ++ "items": { ++ "$ref": "InterconnectRemoteLocationPermittedConnections" ++ }, ++ "type": "array" ++ }, ++ "remoteService": { ++ "description": "[Output Only] Indicates the service provider present at the remote location. Example values: \"Amazon Web Services\", \"Microsoft Azure\".", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for the resource.", ++ "type": "string" ++ }, ++ "status": { ++ "description": "[Output Only] The status of this InterconnectRemoteLocation, which can take one of the following values: - CLOSED: The InterconnectRemoteLocation is closed and is unavailable for provisioning new Cross-Cloud Interconnects. - AVAILABLE: The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects. ", ++ "enum": [ ++ "AVAILABLE", ++ "CLOSED" ++ ], ++ "enumDescriptions": [ ++ "The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects.", ++ "The InterconnectRemoteLocation is closed for provisioning new Cross-Cloud Interconnects." ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationConstraints": { ++ "id": "InterconnectRemoteLocationConstraints", ++ "properties": { ++ "portPairRemoteLocation": { ++ "description": "[Output Only] Port pair remote location constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to individual ports, but the UI uses this field when ordering a pair of ports, to prevent users from accidentally ordering something that is incompatible with their cloud provider. Specifically, when ordering a redundant pair of Cross-Cloud Interconnect ports, and one of them uses a remote location with portPairMatchingRemoteLocation set to matching, the UI requires that both ports use the same remote location.", ++ "enum": [ ++ "PORT_PAIR_MATCHING_REMOTE_LOCATION", ++ "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" ++ ], ++ "enumDescriptions": [ ++ "If PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider allocates ports in pairs, and the user should choose the same remote location for both ports.", ++ "If PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision a redundant pair of Cross-Cloud Interconnects using two different remote locations in the same city." ++ ], ++ "type": "string" ++ }, ++ "portPairVlan": { ++ "description": "[Output Only] Port pair VLAN constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, PORT_PAIR_MATCHING_VLAN", ++ "enum": [ ++ "PORT_PAIR_MATCHING_VLAN", ++ "PORT_PAIR_UNCONSTRAINED_VLAN" ++ ], ++ "enumDescriptions": [ ++ "If PORT_PAIR_MATCHING_VLAN, the Interconnect for this attachment is part of a pair of ports that should have matching VLAN allocations. This occurs with Cross-Cloud Interconnect to Azure remote locations. While GCP's API does not explicitly group pairs of ports, the UI uses this field to ensure matching VLAN ids when configuring a redundant VLAN pair.", ++ "PORT_PAIR_UNCONSTRAINED_VLAN means there is no constraint." ++ ], ++ "type": "string" ++ }, ++ "subnetLengthRange": { ++ "$ref": "InterconnectRemoteLocationConstraintsSubnetLengthRange", ++ "description": "[Output Only] [min-length, max-length] The minimum and maximum value (inclusive) for the IPv4 subnet length. For example, an interconnectRemoteLocation for Azure has {min: 30, max: 30} because Azure requires /30 subnets. This range specifies the values supported by both cloud providers. Interconnect currently supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no constraint on IPv4 subnet length, the range would thus be {min: 29, max: 30}. " ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationConstraintsSubnetLengthRange": { ++ "id": "InterconnectRemoteLocationConstraintsSubnetLengthRange", ++ "properties": { ++ "max": { ++ "format": "int32", ++ "type": "integer" ++ }, ++ "min": { ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationList": { ++ "description": "Response to the list request, and contains a list of interconnect remote locations.", ++ "id": "InterconnectRemoteLocationList", ++ "properties": { ++ "id": { ++ "description": "[Output Only] Unique identifier for the resource; defined by the server.", ++ "type": "string" ++ }, ++ "items": { ++ "description": "A list of InterconnectRemoteLocation resources.", ++ "items": { ++ "$ref": "InterconnectRemoteLocation" ++ }, ++ "type": "array" ++ }, ++ "kind": { ++ "default": "compute#interconnectRemoteLocationList", ++ "description": "[Output Only] Type of resource. Always compute#interconnectRemoteLocationList for lists of interconnect remote locations.", ++ "type": "string" ++ }, ++ "nextPageToken": { ++ "description": "[Output Only] This token lets you get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", ++ "type": "string" ++ }, ++ "selfLink": { ++ "description": "[Output Only] Server-defined URL for this resource.", ++ "type": "string" ++ }, ++ "warning": { ++ "description": "[Output Only] Informational warning message.", ++ "properties": { ++ "code": { ++ "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", ++ "enum": [ ++ "CLEANUP_FAILED", ++ "DEPRECATED_RESOURCE_USED", ++ "DEPRECATED_TYPE_USED", ++ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", ++ "EXPERIMENTAL_TYPE_USED", ++ "EXTERNAL_API_WARNING", ++ "FIELD_VALUE_OVERRIDEN", ++ "INJECTED_KERNELS_DEPRECATED", ++ "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", ++ "LARGE_DEPLOYMENT_WARNING", ++ "MISSING_TYPE_DEPENDENCY", ++ "NEXT_HOP_ADDRESS_NOT_ASSIGNED", ++ "NEXT_HOP_CANNOT_IP_FORWARD", ++ "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", ++ "NEXT_HOP_INSTANCE_NOT_FOUND", ++ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", ++ "NEXT_HOP_NOT_RUNNING", ++ "NOT_CRITICAL_ERROR", ++ "NO_RESULTS_ON_PAGE", ++ "PARTIAL_SUCCESS", ++ "REQUIRED_TOS_AGREEMENT", ++ "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", ++ "RESOURCE_NOT_DELETED", ++ "SCHEMA_VALIDATION_IGNORED", ++ "SINGLE_INSTANCE_PROPERTY_TEMPLATE", ++ "UNDECLARED_PROPERTIES", ++ "UNREACHABLE" ++ ], ++ "enumDescriptions": [ ++ "Warning about failed cleanup of transient changes made by a failed operation.", ++ "A link to a deprecated resource was created.", ++ "When deploying and at least one of the resources has a type marked as deprecated", ++ "The user created a boot disk that is larger than image size.", ++ "When deploying and at least one of the resources has a type marked as experimental", ++ "Warning that is present in an external api call", ++ "Warning that value of a field has been overridden. Deprecated unused field.", ++ "The operation involved use of an injected kernel, which is deprecated.", ++ "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", ++ "When deploying a deployment with a exceedingly large number of resources", ++ "A resource depends on a missing type", ++ "The route's nextHopIp address is not assigned to an instance on the network.", ++ "The route's next hop instance cannot ip forward.", ++ "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", ++ "The route's nextHopInstance URL refers to an instance that does not exist.", ++ "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", ++ "The route's next hop instance does not have a status of RUNNING.", ++ "Error which is not critical. We decided to continue the process despite the mentioned error.", ++ "No results are present on a particular list page.", ++ "Success is reported, but some results may be missing due to errors", ++ "The user attempted to use a resource that requires a TOS they have not accepted.", ++ "Warning that a resource is in use.", ++ "One or more of the resources set to auto-delete could not be deleted because they were in use.", ++ "When a resource schema validation is ignored.", ++ "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", ++ "When undeclared properties in the schema are present", ++ "A given scope cannot be reached." ++ ], ++ "type": "string" ++ }, ++ "data": { ++ "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", ++ "items": { ++ "properties": { ++ "key": { ++ "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", ++ "type": "string" ++ }, ++ "value": { ++ "description": "[Output Only] A warning data value corresponding to the key.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, ++ "type": "array" ++ }, ++ "message": { ++ "description": "[Output Only] A human-readable description of the warning code.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ } ++ }, ++ "type": "object" ++ }, ++ "InterconnectRemoteLocationPermittedConnections": { ++ "id": "InterconnectRemoteLocationPermittedConnections", ++ "properties": { ++ "interconnectLocation": { ++ "description": "[Output Only] URL of an Interconnect location that is permitted to connect to this Interconnect remote location.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "InterconnectsGetDiagnosticsResponse": { + "description": "Response for the InterconnectsGetDiagnosticsRequest.", + "id": "InterconnectsGetDiagnosticsResponse", +@@ -48464,7 +49318,7 @@ + "type": "integer" + }, + "guestAcceleratorType": { +- "description": "The accelerator type resource name, not a full URL, e.g. 'nvidia-tesla-k80'.", ++ "description": "The accelerator type resource name, not a full URL, e.g. nvidia-tesla-t4.", + "type": "string" + } + }, +@@ -48971,7 +49825,7 @@ + "TERMINATED" + ], + "enumDescriptions": [ +- "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", ++ "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "Resources are being allocated for the instance.", + "The instance is in repair.", + "The instance is running.", +@@ -49236,7 +50090,7 @@ + "type": "string" + }, + "gatewayIPv4": { +- "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", ++ "description": "[Output Only] The gateway address for default routing out of the network, selected by Google Cloud.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", + "type": "string" + }, +@@ -49343,7 +50197,7 @@ + "type": "string" + }, + "fingerprint": { +- "description": "[Output Only] Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", ++ "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", + "format": "byte", + "type": "string" + }, +@@ -49368,7 +50222,7 @@ + "type": "string" + }, + "network": { +- "description": "[Output Only] The URL of the network which the Network Attachment belongs to.", ++ "description": "[Output Only] The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks.", + "type": "string" + }, + "producerAcceptLists": { +@@ -49532,7 +50386,7 @@ + "id": "NetworkAttachmentConnectedEndpoint", + "properties": { + "ipAddress": { +- "description": "The IP address assigned to the producer instance network interface. This value will be a range in case of Serverless.", ++ "description": "The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless.", + "type": "string" + }, + "projectIdOrNum": { +@@ -49540,7 +50394,7 @@ + "type": "string" + }, + "secondaryIpCidrRanges": { +- "description": "Alias IP ranges from the same subnetwork", ++ "description": "Alias IP ranges from the same subnetwork.", + "items": { + "type": "string" + }, +@@ -50919,7 +51773,7 @@ + "type": "integer" + }, + "stackType": { +- "description": "The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations.", ++ "description": "The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations.", + "enum": [ + "IPV4_IPV6", + "IPV4_ONLY" +@@ -52008,6 +52862,19 @@ + }, + "type": "object" + }, ++ "NodeGroupsSimulateMaintenanceEventRequest": { ++ "id": "NodeGroupsSimulateMaintenanceEventRequest", ++ "properties": { ++ "nodes": { ++ "description": "Names of the nodes to go under maintenance simulation.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "NodeTemplate": { + "description": "Represent a sole-tenant Node Template resource. You can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances.", + "id": "NodeTemplate", +@@ -55474,6 +56341,7 @@ + "COMMITTED_NVIDIA_A100_80GB_GPUS", + "COMMITTED_NVIDIA_A100_GPUS", + "COMMITTED_NVIDIA_K80_GPUS", ++ "COMMITTED_NVIDIA_L4_GPUS", + "COMMITTED_NVIDIA_P100_GPUS", + "COMMITTED_NVIDIA_P4_GPUS", + "COMMITTED_NVIDIA_T4_GPUS", +@@ -55525,11 +56393,15 @@ + "NETWORK_ATTACHMENTS", + "NETWORK_ENDPOINT_GROUPS", + "NETWORK_FIREWALL_POLICIES", ++ "NET_LB_SECURITY_POLICIES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULES_PER_REGION", ++ "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION", + "NODE_GROUPS", + "NODE_TEMPLATES", + "NVIDIA_A100_80GB_GPUS", + "NVIDIA_A100_GPUS", + "NVIDIA_K80_GPUS", ++ "NVIDIA_L4_GPUS", + "NVIDIA_P100_GPUS", + "NVIDIA_P100_VWS_GPUS", + "NVIDIA_P4_GPUS", +@@ -55544,6 +56416,7 @@ + "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS", + "PREEMPTIBLE_NVIDIA_A100_GPUS", + "PREEMPTIBLE_NVIDIA_K80_GPUS", ++ "PREEMPTIBLE_NVIDIA_L4_GPUS", + "PREEMPTIBLE_NVIDIA_P100_GPUS", + "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", + "PREEMPTIBLE_NVIDIA_P4_GPUS", +@@ -55567,6 +56440,7 @@ + "ROUTES", + "SECURITY_POLICIES", + "SECURITY_POLICIES_PER_REGION", ++ "SECURITY_POLICY_ADVANCED_RULES_PER_REGION", + "SECURITY_POLICY_CEVAL_RULES", + "SECURITY_POLICY_RULES", + "SECURITY_POLICY_RULES_PER_REGION", +@@ -55624,6 +56498,7 @@ + "", + "", + "", ++ "", + "Guest CPUs", + "", + "", +@@ -55715,6 +56590,12 @@ + "", + "", + "", ++ "", ++ "", ++ "", ++ "", ++ "", ++ "", + "The total number of snapshots allowed for a single project.", + "", + "", +@@ -55868,6 +56749,20 @@ + }, + "type": "object" + }, ++ "RegionAddressesMoveRequest": { ++ "id": "RegionAddressesMoveRequest", ++ "properties": { ++ "description": { ++ "description": "An optional destination address description if intended to be different from the source.", ++ "type": "string" ++ }, ++ "destinationAddress": { ++ "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project/regions/region /addresses/address - projects/project/regions/region/addresses/address Note that destination project must be different from the source project. So /regions/region/addresses/address is not valid partial url.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "RegionAutoscalerList": { + "description": "Contains a list of autoscalers.", + "id": "RegionAutoscalerList", +@@ -59465,7 +60360,7 @@ + "type": "array" + }, + "sourceSubnetworkIpRangesToNat": { +- "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", ++ "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then there should not be any other Router.Nat section in any Router for this network in this region.", + "enum": [ + "ALL_SUBNETWORKS_ALL_IP_RANGES", + "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", +@@ -60675,6 +61570,18 @@ + "description": "[Output only] Type of the resource. Always compute#securityPolicyfor security policies", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the security policy.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +@@ -60727,15 +61634,15 @@ + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig": { +- "description": "Configuration options for L7 DDoS detection.", ++ "description": "Configuration options for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig", + "properties": { + "enable": { +- "description": "If set to true, enables CAAP for L7 DDoS detection.", ++ "description": "If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "boolean" + }, + "ruleVisibility": { +- "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules.", ++ "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "enum": [ + "PREMIUM", + "STANDARD" +@@ -60930,7 +61837,7 @@ + "id": "SecurityPolicyRecaptchaOptionsConfig", + "properties": { + "redirectSiteKey": { +- "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used.", ++ "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + } + }, +@@ -60950,7 +61857,7 @@ + "id": "SecurityPolicyRule", + "properties": { + "action": { +- "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", ++ "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", + "type": "string" + }, + "description": { +@@ -60959,7 +61866,7 @@ + }, + "headerAction": { + "$ref": "SecurityPolicyRuleHttpHeaderAction", +- "description": "Optional, additional actions that are performed on headers." ++ "description": "Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "kind": { + "default": "compute#securityPolicyRule", +@@ -60989,7 +61896,7 @@ + }, + "redirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action. Cannot be specified for any other actions." ++ "description": "Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + } + }, + "type": "object" +@@ -61031,7 +61938,7 @@ + }, + "expr": { + "$ref": "Expr", +- "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." ++ "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing `evaluateThreatIntelligence` require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies." + }, + "versionedExpr": { + "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", +@@ -61185,17 +62092,24 @@ + ], + "type": "string" + }, ++ "enforceOnKeyConfigs": { ++ "description": "If specified, any combination of values of enforce_on_key_type/enforce_on_key_name is treated as the key on which ratelimit threshold/action is enforced. You can specify up to 3 enforce_on_key_configs. If enforce_on_key_configs is specified, enforce_on_key must not be specified.", ++ "items": { ++ "$ref": "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig" ++ }, ++ "type": "array" ++ }, + "enforceOnKeyName": { + "description": "Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value.", + "type": "string" + }, + "exceedAction": { +- "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below.", ++ "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below. The `redirect` action is only supported in Global Security Policies of type CLOUD_ARMOR.", + "type": "string" + }, + "exceedRedirectOptions": { + "$ref": "SecurityPolicyRuleRedirectOptions", +- "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect." ++ "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR." + }, + "rateLimitThreshold": { + "$ref": "SecurityPolicyRuleRateLimitOptionsThreshold", +@@ -61204,6 +62118,40 @@ + }, + "type": "object" + }, ++ "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig": { ++ "id": "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig", ++ "properties": { ++ "enforceOnKeyName": { ++ "description": "Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value.", ++ "type": "string" ++ }, ++ "enforceOnKeyType": { ++ "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", ++ "enum": [ ++ "ALL", ++ "HTTP_COOKIE", ++ "HTTP_HEADER", ++ "HTTP_PATH", ++ "IP", ++ "REGION_CODE", ++ "SNI", ++ "XFF_IP" ++ ], ++ "enumDescriptions": [ ++ "", ++ "", ++ "", ++ "", ++ "", ++ "", ++ "", ++ "" ++ ], ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "SecurityPolicyRuleRateLimitOptionsThreshold": { + "id": "SecurityPolicyRuleRateLimitOptionsThreshold", + "properties": { +@@ -61247,11 +62195,11 @@ + "id": "SecuritySettings", + "properties": { + "clientTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted.", + "type": "string" + }, + "subjectAltNames": { +- "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact.", ++ "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode).", + "items": { + "type": "string" + }, +@@ -61328,7 +62276,7 @@ + "type": "object" + }, + "ServiceAttachment": { +- "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service. next tag = 20", ++ "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service.", + "id": "ServiceAttachment", + "properties": { + "connectedEndpoints": { +@@ -61425,6 +62373,10 @@ + "$ref": "Uint128", + "description": "[Output Only] An 128-bit global unique ID of the PSC service attachment." + }, ++ "reconcileConnections": { ++ "description": "This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. - If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . - If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. For newly created service attachment, this boolean defaults to true.", ++ "type": "boolean" ++ }, + "region": { + "description": "[Output Only] URL of the region where the service attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" +@@ -62059,6 +63011,7 @@ + "name": { + "annotations": { + "required": [ ++ "compute.disks.createSnapshot", + "compute.snapshots.insert" + ] + }, +@@ -63499,7 +64452,7 @@ + "type": "string" + }, + "enableFlowLogs": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "externalIpv6Prefix": { +@@ -63581,7 +64534,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "INTERNAL_HTTPS_LOAD_BALANCER", + "PRIVATE", +@@ -63603,7 +64556,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -63924,7 +64877,7 @@ + "type": "string" + }, + "enable": { +- "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled.", ++ "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. Flow logging isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "type": "boolean" + }, + "filterExpr": { +@@ -64775,7 +65728,7 @@ + "id": "TargetHttpsProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetHttpsProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetHttpsProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -64823,7 +65776,7 @@ + "type": "string" + }, + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -64881,7 +65834,7 @@ + "type": "string" + }, + "serverTlsPolicy": { +- "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", ++ "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted.", + "type": "string" + }, + "sslCertificates": { +@@ -66080,7 +67033,7 @@ + "id": "TargetSslProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { +- "description": "URL of the Certificate Map to associate with this TargetSslProxy.", ++ "description": "URL of the Certificate Map to associate with this TargetSslProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + } + }, +@@ -66122,7 +67075,7 @@ + "id": "TargetSslProxy", + "properties": { + "certificateMap": { +- "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", ++ "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", + "type": "string" + }, + "creationTimestamp": { +@@ -66766,6 +67719,18 @@ + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this TargetVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a TargetVpnGateway.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "name": { + "annotations": { + "required": [ +@@ -67821,6 +68786,10 @@ + "pathPrefixRewrite": { + "description": "Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. The value must be from 1 to 1024 characters.", + "type": "string" ++ }, ++ "pathTemplateRewrite": { ++ "description": " If specified, the pattern rewrites the URL path (based on the :path header) using the HTTP template syntax. A corresponding path_template_match must be specified. Any template variables must exist in the path_template_match field. - -At least one variable must be specified in the path_template_match field - You can omit variables from the rewritten URL - The * and ** operators cannot be matched unless they have a corresponding variable name - e.g. {format=*} or {var=**}. For example, a path_template_match of /static/{format=**} could be rewritten as /static/content/{format} to prefix /content to the URL. Variables can also be re-ordered in a rewrite, so that /{country}/{format}/{suffix=**} can be rewritten as /content/{format}/{country}/{suffix}. At least one non-empty routeRules[].matchRules[].path_template_match is required. Only one of path_prefix_rewrite or path_template_rewrite may be specified.", ++ "type": "string" + } + }, + "type": "object" +@@ -67858,7 +68827,7 @@ + "type": "string" + }, + "purpose": { +- "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", ++ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", + "enum": [ + "INTERNAL_HTTPS_LOAD_BALANCER", + "PRIVATE", +@@ -67876,7 +68845,7 @@ + "type": "string" + }, + "role": { +- "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", ++ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" +@@ -68663,7 +69632,7 @@ + "type": "integer" + }, + "peerGatewayInterface": { +- "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or GCP VPN gateway.", ++ "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "format": "uint32", + "type": "integer" + }, +@@ -68675,7 +69644,7 @@ + "type": "object" + }, + "VpnGatewayStatusVpnConnection": { +- "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be a external VPN gateway or GCP VPN gateway.", ++ "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", + "id": "VpnGatewayStatusVpnConnection", + "properties": { + "peerExternalGateway": { +@@ -68862,6 +69831,18 @@ + "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", + "type": "string" + }, ++ "labelFingerprint": { ++ "description": "A fingerprint for the labels being applied to this VpnTunnel, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a VpnTunnel.", ++ "format": "byte", ++ "type": "string" ++ }, ++ "labels": { ++ "additionalProperties": { ++ "type": "string" ++ }, ++ "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", ++ "type": "object" ++ }, + "localTrafficSelector": { + "description": "Local traffic selector to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges must be disjoint. Only IPv4 is supported.", + "items": { +diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go +index c30ae0d4e1d..47ec0a57f3a 100644 +--- a/vendor/google.golang.org/api/compute/v1/compute-gen.go ++++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go +@@ -75,6 +75,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "compute:v1" + const apiName = "compute" +@@ -172,6 +173,7 @@ func New(client *http.Client) (*Service, error) { + s.Instances = NewInstancesService(s) + s.InterconnectAttachments = NewInterconnectAttachmentsService(s) + s.InterconnectLocations = NewInterconnectLocationsService(s) ++ s.InterconnectRemoteLocations = NewInterconnectRemoteLocationsService(s) + s.Interconnects = NewInterconnectsService(s) + s.LicenseCodes = NewLicenseCodesService(s) + s.Licenses = NewLicensesService(s) +@@ -299,6 +301,8 @@ type Service struct { + + InterconnectLocations *InterconnectLocationsService + ++ InterconnectRemoteLocations *InterconnectRemoteLocationsService ++ + Interconnects *InterconnectsService + + LicenseCodes *LicenseCodesService +@@ -685,6 +689,15 @@ type InterconnectLocationsService struct { + s *Service + } + ++func NewInterconnectRemoteLocationsService(s *Service) *InterconnectRemoteLocationsService { ++ rs := &InterconnectRemoteLocationsService{s: s} ++ return rs ++} ++ ++type InterconnectRemoteLocationsService struct { ++ s *Service ++} ++ + func NewInterconnectsService(s *Service) *InterconnectsService { + rs := &InterconnectsService{s: s} + return rs +@@ -1917,32 +1930,35 @@ func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + // AccessConfig: An access configuration attached to an instance's + // network interface. Only one access config per instance is supported. + type AccessConfig struct { +- // ExternalIpv6: The first IPv6 address of the external IPv6 range +- // associated with this instance, prefix length is stored in +- // externalIpv6PrefixLength in ipv6AccessConfig. To use a static +- // external IP address, it must be unused and in the same region as the +- // instance's zone. If not specified, Google Cloud will automatically +- // assign an external IPv6 address from the instance's subnetwork. ++ // ExternalIpv6: Applies to ipv6AccessConfigs only. The first IPv6 ++ // address of the external IPv6 range associated with this instance, ++ // prefix length is stored in externalIpv6PrefixLength in ++ // ipv6AccessConfig. To use a static external IP address, it must be ++ // unused and in the same region as the instance's zone. If not ++ // specified, Google Cloud will automatically assign an external IPv6 ++ // address from the instance's subnetwork. + ExternalIpv6 string `json:"externalIpv6,omitempty"` + +- // ExternalIpv6PrefixLength: The prefix length of the external IPv6 +- // range. ++ // ExternalIpv6PrefixLength: Applies to ipv6AccessConfigs only. The ++ // prefix length of the external IPv6 range. + ExternalIpv6PrefixLength int64 `json:"externalIpv6PrefixLength,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#accessConfig + // for access configs. + Kind string `json:"kind,omitempty"` + +- // Name: The name of this access configuration. The default and +- // recommended name is External NAT, but you can use any arbitrary +- // string, such as My external IP or Network Access. ++ // Name: The name of this access configuration. In accessConfigs (IPv4), ++ // the default and recommended name is External NAT, but you can use any ++ // arbitrary string, such as My external IP or Network Access. In ++ // ipv6AccessConfigs, the recommend name is External IPv6. + Name string `json:"name,omitempty"` + +- // NatIP: An external IP address associated with this instance. Specify +- // an unused static external IP address available to the project or +- // leave this field undefined to use an IP from a shared ephemeral IP +- // address pool. If you specify a static external IP address, it must +- // live in the same region as the zone of the instance. ++ // NatIP: Applies to accessConfigs (IPv4) only. An external IP address ++ // associated with this instance. Specify an unused static external IP ++ // address available to the project or leave this field undefined to use ++ // an IP from a shared ephemeral IP address pool. If you specify a ++ // static external IP address, it must live in the same region as the ++ // zone of the instance. + NatIP string `json:"natIP,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -1978,12 +1994,13 @@ type AccessConfig struct { + // associated. + SetPublicPtr bool `json:"setPublicPtr,omitempty"` + +- // Type: The type of configuration. The default and only option is +- // ONE_TO_ONE_NAT. ++ // Type: The type of configuration. In accessConfigs (IPv4), the default ++ // and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default ++ // and only option is DIRECT_IPV6. + // + // Possible values: + // "DIRECT_IPV6" +- // "ONE_TO_ONE_NAT" (default) ++ // "ONE_TO_ONE_NAT" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExternalIpv6") to +@@ -2065,6 +2082,21 @@ type Address struct { + // addresses. + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // Address, which is essentially a hash of the labels set used for ++ // optimistic locking. The fingerprint is initially generated by Compute ++ // Engine and changes after every request to modify or update labels. ++ // You must always provide an up-to-date fingerprint hash in order to ++ // update or change labels, otherwise the request will fail with error ++ // 412 conditionNotMet. To see the latest fingerprint, make a get() ++ // request to retrieve an Address. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -3140,6 +3172,17 @@ type AttachedDisk struct { + // read-write mode. + Mode string `json:"mode,omitempty"` + ++ // SavedState: For LocalSSD disks on VM Instances in STOPPED or ++ // SUSPENDED state, this field is set to PRESERVED if the LocalSSD data ++ // has been saved to a persistent location by customer request. (see the ++ // discard_local_ssd option on Stop/Suspend). Read-only in the api. ++ // ++ // Possible values: ++ // "DISK_SAVED_STATE_UNSPECIFIED" - *[Default]* Disk state has not ++ // been preserved. ++ // "PRESERVED" - Disk state has been preserved. ++ SavedState string `json:"savedState,omitempty"` ++ + // ShieldedInstanceInitialState: [Output Only] shielded vm initial state + // stored on disk + ShieldedInstanceInitialState *InitialStateConfig `json:"shieldedInstanceInitialState,omitempty"` +@@ -3263,6 +3306,18 @@ type AttachedDiskInitializeParams struct { + // see the Extreme persistent disk documentation. + ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + ++ // ProvisionedThroughput: Indicates how much throughput to provision for ++ // the disk. This sets the number of throughput mb per second that the ++ // disk can handle. Values must be between 1 and 7,124. ++ ProvisionedThroughput int64 `json:"provisionedThroughput,omitempty,string"` ++ ++ // ReplicaZones: Required for each regional disk associated with the ++ // instance. Specify the URLs of the zones where the disk should be ++ // replicated to. You must provide exactly two replica zones, and one ++ // zone must be the same as the instance zone. You can't use this option ++ // with boot disks. ++ ReplicaZones []string `json:"replicaZones,omitempty"` ++ + // ResourceManagerTags: Resource manager tags to be bound to the disk. + // Tag keys and values have the same definition as resource manager + // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values +@@ -4262,15 +4317,17 @@ func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { + + // AutoscalingPolicy: Cloud Autoscaler policy. + type AutoscalingPolicy struct { +- // CoolDownPeriodSec: The number of seconds that the autoscaler waits +- // before it starts collecting information from a new instance. This +- // prevents the autoscaler from collecting information when the instance +- // is initializing, during which the collected usage would not be +- // reliable. The default time autoscaler waits is 60 seconds. Virtual +- // machine initialization times might vary because of numerous factors. +- // We recommend that you test how long an instance may take to +- // initialize. To do this, create an instance and time the startup +- // process. ++ // CoolDownPeriodSec: The number of seconds that your application takes ++ // to initialize on a VM instance. This is referred to as the ++ // initialization period (/compute/docs/autoscaler#cool_down_period). ++ // Specifying an accurate initialization period improves autoscaler ++ // decisions. For example, when scaling out, the autoscaler ignores data ++ // from VMs that are still initializing because those VMs might not yet ++ // represent normal usage of your application. The default ++ // initialization period is 60 seconds. Initialization periods might ++ // vary because of numerous factors. We recommend that you test how long ++ // your application takes to initialize. To do this, create a VM and ++ // time your application's startup process. + CoolDownPeriodSec int64 `json:"coolDownPeriodSec,omitempty"` + + // CpuUtilization: Defines the CPU utilization policy that allows the +@@ -4298,7 +4355,12 @@ type AutoscalingPolicy struct { + // instances allowed. + MinNumReplicas int64 `json:"minNumReplicas,omitempty"` + +- // Mode: Defines operating mode for this policy. ++ // Mode: Defines the operating mode for this policy. The following modes ++ // are available: - OFF: Disables the autoscaler but maintains its ++ // configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM ++ // instances only. - ON: Enables all autoscaler activities according to ++ // its policy. For more information, see "Turning off or restricting an ++ // autoscaler" + // + // Possible values: + // "OFF" - Do not automatically scale the MIG in or out. The +@@ -5591,6 +5653,10 @@ type BackendService struct { + // loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED. + MaxStreamDuration *Duration `json:"maxStreamDuration,omitempty"` + ++ // Metadatas: Deployment metadata associated with the resource to be set ++ // by a GKE hub controller and read by the backend RCTH ++ Metadatas map[string]string `json:"metadatas,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -7685,7 +7751,7 @@ type Commitment struct { + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + +- // SplitSourceCommitment: Source commitment to be splitted into a new ++ // SplitSourceCommitment: Source commitment to be split into a new + // commitment. + SplitSourceCommitment string `json:"splitSourceCommitment,omitempty"` + +@@ -7726,6 +7792,7 @@ type Commitment struct { + // "GENERAL_PURPOSE_N2" + // "GENERAL_PURPOSE_N2D" + // "GENERAL_PURPOSE_T2D" ++ // "GRAPHICS_OPTIMIZED" + // "MEMORY_OPTIMIZED" + // "MEMORY_OPTIMIZED_M3" + // "TYPE_UNSPECIFIED" +@@ -8773,6 +8840,13 @@ type Disk struct { + // "X86_64" - Machines with architecture X86_64 + Architecture string `json:"architecture,omitempty"` + ++ // AsyncPrimaryDisk: Disk asynchronously replicated into this disk. ++ AsyncPrimaryDisk *DiskAsyncReplication `json:"asyncPrimaryDisk,omitempty"` ++ ++ // AsyncSecondaryDisks: [Output Only] A list of disks this disk is ++ // asynchronously replicated to. ++ AsyncSecondaryDisks map[string]DiskAsyncReplicationList `json:"asyncSecondaryDisks,omitempty"` ++ + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` +@@ -8877,6 +8951,11 @@ type Disk struct { + // see the Extreme persistent disk documentation. + ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + ++ // ProvisionedThroughput: Indicates how much throughput to provision for ++ // the disk. This sets the number of throughput mb per second that the ++ // disk can handle. Values must be between 1 and 7,124. ++ ProvisionedThroughput int64 `json:"provisionedThroughput,omitempty,string"` ++ + // Region: [Output Only] URL of the region where the disk resides. Only + // applicable for regional resources. You must specify this field as + // part of the HTTP request URL. It is not settable as a field in the +@@ -8891,6 +8970,10 @@ type Disk struct { + // automatic snapshot creations. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + ++ // ResourceStatus: [Output Only] Status information for the disk ++ // resource. ++ ResourceStatus *DiskResourceStatus `json:"resourceStatus,omitempty"` ++ + // SatisfiesPzs: [Output Only] Reserved for future use. + SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` + +@@ -8906,6 +8989,16 @@ type Disk struct { + // source. Acceptable values are 1 to 65536, inclusive. + SizeGb int64 `json:"sizeGb,omitempty,string"` + ++ // SourceConsistencyGroupPolicy: [Output Only] URL of the ++ // DiskConsistencyGroupPolicy for a secondary disk that was created ++ // using a consistency group. ++ SourceConsistencyGroupPolicy string `json:"sourceConsistencyGroupPolicy,omitempty"` ++ ++ // SourceConsistencyGroupPolicyId: [Output Only] ID of the ++ // DiskConsistencyGroupPolicy for a secondary disk that was created ++ // using a consistency group. ++ SourceConsistencyGroupPolicyId string `json:"sourceConsistencyGroupPolicyId,omitempty"` ++ + // SourceDisk: The source disk used to create this disk. You can provide + // this as a partial or full URL to the resource. For example, the + // following are valid values: - +@@ -9230,6 +9323,86 @@ func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type DiskAsyncReplication struct { ++ // ConsistencyGroupPolicy: [Output Only] URL of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicy string `json:"consistencyGroupPolicy,omitempty"` ++ ++ // ConsistencyGroupPolicyId: [Output Only] ID of the ++ // DiskConsistencyGroupPolicy if replication was started on the disk as ++ // a member of a group. ++ ConsistencyGroupPolicyId string `json:"consistencyGroupPolicyId,omitempty"` ++ ++ // Disk: The other disk asynchronously replicated to or from the current ++ // disk. You can provide this as a partial or full URL to the resource. ++ // For example, the following are valid values: - ++ // https://www.googleapis.com/compute/v1/projects/project/zones/zone ++ // /disks/disk - projects/project/zones/zone/disks/disk - ++ // zones/zone/disks/disk ++ Disk string `json:"disk,omitempty"` ++ ++ // DiskId: [Output Only] The unique ID of the other disk asynchronously ++ // replicated to or from the current disk. This value identifies the ++ // exact disk that was used to create this replication. For example, if ++ // you started replicating the persistent disk from a disk that was ++ // later deleted and recreated under the same name, the disk ID would ++ // identify the exact version of the disk that was used. ++ DiskId string `json:"diskId,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "ConsistencyGroupPolicy") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ConsistencyGroupPolicy") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskAsyncReplication) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskAsyncReplication ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type DiskAsyncReplicationList struct { ++ AsyncReplicationDisk *DiskAsyncReplication `json:"asyncReplicationDisk,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "AsyncReplicationDisk") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AsyncReplicationDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskAsyncReplicationList) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskAsyncReplicationList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // DiskInstantiationConfig: A specification of the desired way to + // instantiate a disk in the instance template when its created from a + // source instance. +@@ -9571,6 +9744,70 @@ func (s *DiskParams) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type DiskResourceStatus struct { ++ AsyncPrimaryDisk *DiskResourceStatusAsyncReplicationStatus `json:"asyncPrimaryDisk,omitempty"` ++ ++ // AsyncSecondaryDisks: Key: disk, value: AsyncReplicationStatus message ++ AsyncSecondaryDisks map[string]DiskResourceStatusAsyncReplicationStatus `json:"asyncSecondaryDisks,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "AsyncPrimaryDisk") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "AsyncPrimaryDisk") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskResourceStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskResourceStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type DiskResourceStatusAsyncReplicationStatus struct { ++ // Possible values: ++ // "ACTIVE" - Replication is active. ++ // "CREATED" - Secondary disk is created and is waiting for ++ // replication to start. ++ // "STARTING" - Replication is starting. ++ // "STATE_UNSPECIFIED" ++ // "STOPPED" - Replication is stopped. ++ // "STOPPING" - Replication is stopping. ++ State string `json:"state,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "State") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "State") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *DiskResourceStatusAsyncReplicationStatus) MarshalJSON() ([]byte, error) { ++ type NoMethod DiskResourceStatusAsyncReplicationStatus ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // DiskType: Represents a Disk Type resource. Google Compute Engine has + // two Disk Type resources: * Regional + // (/compute/docs/reference/rest/v1/regionDiskTypes) * Zonal +@@ -12303,38 +12540,77 @@ func (s *FirewallPolicyRule) MarshalJSON() ([]byte, error) { + // FirewallPolicyRuleMatcher: Represents a match condition that incoming + // traffic is evaluated against. Exactly one field must be specified. + type FirewallPolicyRuleMatcher struct { ++ // DestAddressGroups: Address groups which should be matched against the ++ // traffic destination. Maximum number of destination address groups is ++ // 10. ++ DestAddressGroups []string `json:"destAddressGroups,omitempty"` ++ ++ // DestFqdns: Fully Qualified Domain Name (FQDN) which should be matched ++ // against traffic destination. Maximum number of destination fqdn ++ // allowed is 100. ++ DestFqdns []string `json:"destFqdns,omitempty"` ++ + // DestIpRanges: CIDR IP address range. Maximum number of destination + // CIDR IP ranges allowed is 5000. + DestIpRanges []string `json:"destIpRanges,omitempty"` + ++ // DestRegionCodes: Region codes whose IP addresses will be used to ++ // match for destination of traffic. Should be specified as 2 letter ++ // country code defined as per ISO 3166 alpha-2 country codes. ex."US" ++ // Maximum number of dest region codes allowed is 5000. ++ DestRegionCodes []string `json:"destRegionCodes,omitempty"` ++ ++ // DestThreatIntelligences: Names of Network Threat Intelligence lists. ++ // The IPs in these lists will be matched against traffic destination. ++ DestThreatIntelligences []string `json:"destThreatIntelligences,omitempty"` ++ + // Layer4Configs: Pairs of IP protocols and ports that the rule should + // match. + Layer4Configs []*FirewallPolicyRuleMatcherLayer4Config `json:"layer4Configs,omitempty"` + ++ // SrcAddressGroups: Address groups which should be matched against the ++ // traffic source. Maximum number of source address groups is 10. ++ SrcAddressGroups []string `json:"srcAddressGroups,omitempty"` ++ ++ // SrcFqdns: Fully Qualified Domain Name (FQDN) which should be matched ++ // against traffic source. Maximum number of source fqdn allowed is 100. ++ SrcFqdns []string `json:"srcFqdns,omitempty"` ++ + // SrcIpRanges: CIDR IP address range. Maximum number of source CIDR IP + // ranges allowed is 5000. + SrcIpRanges []string `json:"srcIpRanges,omitempty"` + ++ // SrcRegionCodes: Region codes whose IP addresses will be used to match ++ // for source of traffic. Should be specified as 2 letter country code ++ // defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number ++ // of source region codes allowed is 5000. ++ SrcRegionCodes []string `json:"srcRegionCodes,omitempty"` ++ + // SrcSecureTags: List of secure tag values, which should be matched at + // the source of the traffic. For INGRESS rule, if all the srcSecureTag + // are INEFFECTIVE, and there is no srcIpRange, this rule will be + // ignored. Maximum number of source tag values allowed is 256. + SrcSecureTags []*FirewallPolicyRuleSecureTag `json:"srcSecureTags,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "DestIpRanges") to +- // unconditionally include in API requests. By default, fields with ++ // SrcThreatIntelligences: Names of Network Threat Intelligence lists. ++ // The IPs in these lists will be matched against traffic source. ++ SrcThreatIntelligences []string `json:"srcThreatIntelligences,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "DestAddressGroups") ++ // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "DestIpRanges") to include +- // in API requests with the JSON null value. By default, fields with +- // empty values are omitted from API requests. However, any field with +- // an empty value appearing in NullFields will be sent to the server as +- // null. It is an error if a field in this list has a non-empty value. +- // This may be used to include null fields in Patch requests. ++ // NullFields is a list of field names (e.g. "DestAddressGroups") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. + NullFields []string `json:"-"` + } + +@@ -12533,6 +12809,10 @@ type ForwardingRule struct { + // clients in the same region as the internal load balancer. + AllowGlobalAccess bool `json:"allowGlobalAccess,omitempty"` + ++ // AllowPscGlobalAccess: This is used in PSC consumer ForwardingRule to ++ // control whether the PSC endpoint can be accessed from another region. ++ AllowPscGlobalAccess bool `json:"allowPscGlobalAccess,omitempty"` ++ + // BackendService: Identifies the backend service to which the + // forwarding rule sends traffic. Required for Internal TCP/UDP Load + // Balancing and Network Load Balancing; must be omitted for all other +@@ -12651,9 +12931,10 @@ type ForwardingRule struct { + // Network: This field is not used for external load balancing. For + // Internal TCP/UDP Load Balancing, this field identifies the network + // that the load balanced IP should belong to for this Forwarding Rule. +- // If this field is not specified, the default network will be used. For +- // Private Service Connect forwarding rules that forward traffic to +- // Google APIs, a network must be provided. ++ // If the subnetwork is specified, the network of the subnetwork will be ++ // used. If neither subnetwork nor this field is specified, the default ++ // network will be used. For Private Service Connect forwarding rules ++ // that forward traffic to Google APIs, a network must be provided. + Network string `json:"network,omitempty"` + + // NetworkTier: This signifies the networking tier used for configuring +@@ -13511,6 +13792,43 @@ func (s *GRPCHealthCheck) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type GlobalAddressesMoveRequest struct { ++ // Description: An optional destination address description if intended ++ // to be different from the source. ++ Description string `json:"description,omitempty"` ++ ++ // DestinationAddress: The URL of the destination address to move to. ++ // This can be a full or partial URL. For example, the following are all ++ // valid URLs to a address: - ++ // https://www.googleapis.com/compute/v1/projects/project ++ // /global/addresses/address - projects/project/global/addresses/address ++ // Note that destination project must be different from the source ++ // project. So /global/addresses/address is not valid partial url. ++ DestinationAddress string `json:"destinationAddress,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Description") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Description") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *GlobalAddressesMoveRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod GlobalAddressesMoveRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be attached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` +@@ -13800,8 +14118,8 @@ type GuestOsFeature struct { + // commas to separate values. Set to one or more of the following + // values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - + // UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - +- // SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling +- // guest operating system features. ++ // SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see ++ // Enabling guest operating system features. + // + // Possible values: + // "FEATURE_TYPE_UNSPECIFIED" +@@ -13809,6 +14127,7 @@ type GuestOsFeature struct { + // "MULTI_IP_SUBNET" + // "SECURE_BOOT" + // "SEV_CAPABLE" ++ // "SEV_LIVE_MIGRATABLE" + // "SEV_SNP_CAPABLE" + // "UEFI_COMPATIBLE" + // "VIRTIO_SCSI_MULTIQUEUE" +@@ -14126,12 +14445,12 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { + // (/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) + // load balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Traffic Director must use global +-// health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load ++// health checks (`compute.v1.healthChecks`). Internal TCP/UDP load + // balancers can use either regional or global health checks +-// (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). ++// (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). + // External HTTP(S), TCP proxy, and SSL proxy load balancers as well as + // managed instance group auto-healing must use global health checks +-// (`compute.v1.HealthChecks`). Backend service-based network load ++// (`compute.v1.healthChecks`). Backend service-based network load + // balancers must use regional health checks + // (`compute.v1.regionHealthChecks`). Target pool-based network load + // balancers must use legacy HTTP health checks +@@ -15209,7 +15528,7 @@ type HealthStatus struct { + // instance. + ForwardingRuleIp string `json:"forwardingRuleIp,omitempty"` + +- // HealthState: Health state of the instance. ++ // HealthState: Health state of the IPv4 address of the instance. + // + // Possible values: + // "HEALTHY" +@@ -15294,10 +15613,10 @@ type HealthStatusForNetworkEndpoint struct { + // the health checks configured. + // + // Possible values: +- // "DRAINING" +- // "HEALTHY" +- // "UNHEALTHY" +- // "UNKNOWN" ++ // "DRAINING" - Endpoint is being drained. ++ // "HEALTHY" - Endpoint is healthy. ++ // "UNHEALTHY" - Endpoint is unhealthy. ++ // "UNKNOWN" - Health status of the endpoint is unknown. + HealthState string `json:"healthState,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendService") to +@@ -16450,6 +16769,15 @@ type HttpRouteRuleMatch struct { + // validateForProxyless field set to true. + MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` + ++ // PathTemplateMatch: If specified, the route is a pattern match ++ // expression that must match the :path header once the query string is ++ // removed. A pattern match allows you to match - The value must be ++ // between 1 and 1024 characters - The pattern must start with a leading ++ // slash ("/") - There may be no more than 5 operators in pattern ++ // Precisely one of prefix_match, full_path_match, regex_match or ++ // path_template_match must be set. ++ PathTemplateMatch string `json:"pathTemplateMatch,omitempty"` ++ + // PrefixMatch: For satisfying the matchRule condition, the request's + // path must begin with the specified prefixMatch. prefixMatch must + // begin with a /. The value must be from 1 to 1024 characters. Only one +@@ -17520,9 +17848,9 @@ type Instance struct { + // cycle. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -18775,13 +19103,14 @@ type InstanceGroupManagerAutoHealingPolicy struct { + // HealthCheck: The URL for the health check that signals autohealing. + HealthCheck string `json:"healthCheck,omitempty"` + +- // InitialDelaySec: The number of seconds that the managed instance +- // group waits before it applies autohealing policies to new instances +- // or recently recreated instances. This initial delay allows instances +- // to initialize and run their startup scripts before the instance group +- // determines that they are UNHEALTHY. This prevents the managed +- // instance group from recreating its instances prematurely. This value +- // must be from range [0, 3600]. ++ // InitialDelaySec: The initial delay is the number of seconds that a ++ // new VM takes to initialize and run its startup script. During a VM's ++ // initial delay period, the MIG ignores unsuccessful health checks ++ // because the VM might be in the startup process. This prevents the MIG ++ // from prematurely recreating a VM. If the health check receives a ++ // healthy response during the initial delay, it indicates that the ++ // startup process is complete and the VM is ready. The value of initial ++ // delay must be between 0 and 3600 seconds. The default value is 0. + InitialDelaySec int64 `json:"initialDelaySec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthCheck") to +@@ -19452,7 +19781,9 @@ func (s *InstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, err + type InstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The URLs of one or more instances to delete. This can be a + // full URL or a partial URL, such as +- // zones/[ZONE]/instances/[INSTANCE_NAME]. ++ // zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have ++ // URL and can be deleted only by name. One cannot specify both URLs and ++ // names in a single request. + Instances []string `json:"instances,omitempty"` + + // SkipInstancesOnValidationError: Specifies whether the request should +@@ -22010,9 +22341,9 @@ type InstanceWithNamedPorts struct { + // Status: [Output Only] The status of the instance. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -22604,9 +22935,9 @@ func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { + } + + // Interconnect: Represents an Interconnect resource. An Interconnect +-// resource is a dedicated connection between the GCP network and your +-// on-premises network. For more information, read the Dedicated +-// Interconnect Overview. ++// resource is a dedicated connection between the Google Cloud network ++// and your on-premises network. For more information, read the ++// Dedicated Interconnect Overview. + type Interconnect struct { + // AdminEnabled: Administrative status of the interconnect. When this is + // set to true, the Interconnect is functional and can carry traffic. +@@ -22671,6 +23002,21 @@ type Interconnect struct { + // for interconnects. + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // Interconnect, which is essentially a hash of the labels set used for ++ // optimistic locking. The fingerprint is initially generated by Compute ++ // Engine and changes after every request to modify or update labels. ++ // You must always provide an up-to-date fingerprint hash in order to ++ // update or change labels, otherwise the request will fail with error ++ // 412 conditionNotMet. To see the latest fingerprint, make a get() ++ // request to retrieve an Interconnect. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // LinkType: Type of link requested, which can take one of the following + // values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - + // LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that +@@ -22731,6 +23077,11 @@ type Interconnect struct { + // provisioned in this interconnect. + ProvisionedLinkCount int64 `json:"provisionedLinkCount,omitempty"` + ++ // RemoteLocation: Indicates that this is a Cross-Cloud Interconnect. ++ // This field specifies the location outside of Google's network that ++ // the interconnect is connected to. ++ RemoteLocation string `json:"remoteLocation,omitempty"` ++ + // RequestedLinkCount: Target number of physical links in the link + // bundle, as requested by the customer. + RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` +@@ -22846,6 +23197,11 @@ type InterconnectAttachment struct { + // CloudRouterIpv6InterfaceId: This field is not available. + CloudRouterIpv6InterfaceId string `json:"cloudRouterIpv6InterfaceId,omitempty"` + ++ // ConfigurationConstraints: [Output Only] Constraints for this ++ // attachment, if any. The attachment does not work if these constraints ++ // are not met. ++ ConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"configurationConstraints,omitempty"` ++ + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` +@@ -22938,14 +23294,28 @@ type InterconnectAttachment struct { + // attachment. If this field is not specified when creating the VLAN + // attachment, then later on when creating an HA VPN gateway on this + // VLAN attachment, the HA VPN gateway's IP address is allocated from +- // the regional external IP address pool. Not currently available +- // publicly. ++ // the regional external IP address pool. + IpsecInternalAddresses []string `json:"ipsecInternalAddresses,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#interconnectAttachment for interconnect attachments. + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // InterconnectAttachment, which is essentially a hash of the labels set ++ // used for optimistic locking. The fingerprint is initially generated ++ // by Compute Engine and changes after every request to modify or update ++ // labels. You must always provide an up-to-date fingerprint hash in ++ // order to update or change labels, otherwise the request will fail ++ // with error 412 conditionNotMet. To see the latest fingerprint, make a ++ // get() request to retrieve an InterconnectAttachment. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // Mtu: Maximum Transmission Unit (MTU), in bytes, of packets passing + // through this interconnect attachment. Only 1440 and 1500 are allowed. + // If not specified, the value will default to 1440. +@@ -23001,6 +23371,14 @@ type InterconnectAttachment struct { + // body. + Region string `json:"region,omitempty"` + ++ // RemoteService: [Output Only] If the attachment is on a Cross-Cloud ++ // Interconnect connection, this field contains the interconnect's ++ // remote location service provider. Example values: "Amazon Web ++ // Services" "Microsoft Azure". The field is set only for attachments on ++ // Cross-Cloud Interconnect connections. Its value is copied from the ++ // InterconnectRemoteLocation remoteService field. ++ RemoteService string `json:"remoteService,omitempty"` ++ + // Router: URL of the Cloud Router to be used for dynamic routing. This + // router must be in the same region as this InterconnectAttachment. The + // InterconnectAttachment will automatically connect the Interconnect to +@@ -23064,6 +23442,16 @@ type InterconnectAttachment struct { + // yet, because turnup is not complete. + State string `json:"state,omitempty"` + ++ // SubnetLength: Length of the IPv4 subnet mask. Allowed values: - 29 ++ // (default) - 30 The default value is 29, except for Cross-Cloud ++ // Interconnect connections that use an InterconnectRemoteLocation with ++ // a constraints.subnetLengthRange.min equal to 30. For example, ++ // connections that use an Azure remote location fall into this ++ // category. In these cases, the default value is 30, and requesting 29 ++ // returns an error. Where both 29 and 30 are allowed, 29 is preferred, ++ // because it gives Google Cloud Support more debugging visibility. ++ SubnetLength int64 `json:"subnetLength,omitempty"` ++ + // Type: The type of interconnect attachment this is, which can take one + // of the following values: - DEDICATED: an attachment to a Dedicated + // Interconnect. - PARTNER: an attachment to a Partner Interconnect, +@@ -23302,6 +23690,87 @@ func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type InterconnectAttachmentConfigurationConstraints struct { ++ // BgpMd5: [Output Only] Whether the attachment's BGP session ++ // requires/allows/disallows BGP MD5 authentication. This can take one ++ // of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. ++ // For example, a Cross-Cloud Interconnect connection to a remote cloud ++ // provider that requires BGP MD5 authentication has the ++ // interconnectRemoteLocation ++ // attachment_configuration_constraints.bgp_md5 field set to ++ // MD5_REQUIRED, and that property is propagated to the attachment. ++ // Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 ++ // is requested. ++ // ++ // Possible values: ++ // "MD5_OPTIONAL" - MD5_OPTIONAL: BGP MD5 authentication is supported ++ // and can optionally be configured. ++ // "MD5_REQUIRED" - MD5_REQUIRED: BGP MD5 authentication must be ++ // configured. ++ // "MD5_UNSUPPORTED" - MD5_UNSUPPORTED: BGP MD5 authentication must ++ // not be configured ++ BgpMd5 string `json:"bgpMd5,omitempty"` ++ ++ // BgpPeerAsnRanges: [Output Only] List of ASN ranges that the remote ++ // location is known to support. Formatted as an array of inclusive ++ // ranges {min: min-value, max: max-value}. For example, [{min: 123, ++ // max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or ++ // anything in the range 64512-65534. This field is only advisory. ++ // Although the API accepts other ranges, these are the ranges that we ++ // recommend. ++ BgpPeerAsnRanges []*InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange `json:"bgpPeerAsnRanges,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "BgpMd5") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "BgpMd5") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectAttachmentConfigurationConstraints) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectAttachmentConfigurationConstraints ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange struct { ++ Max int64 `json:"max,omitempty"` ++ ++ Min int64 `json:"min,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Max") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Max") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // InterconnectAttachmentList: Response to the list request, and + // contains a list of interconnect attachments. + type InterconnectAttachmentList struct { +@@ -24690,6 +25159,468 @@ func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// InterconnectRemoteLocation: Represents a Cross-Cloud Interconnect ++// Remote Location resource. You can use this resource to find remote ++// location details about an Interconnect attachment (VLAN). ++type InterconnectRemoteLocation struct { ++ // Address: [Output Only] The postal address of the Point of Presence, ++ // each line in the address is separated by a newline character. ++ Address string `json:"address,omitempty"` ++ ++ // AttachmentConfigurationConstraints: [Output Only] Subset of fields ++ // from InterconnectAttachment's |configurationConstraints| field that ++ // apply to all attachments for this remote location. ++ AttachmentConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"attachmentConfigurationConstraints,omitempty"` ++ ++ // City: [Output Only] Metropolitan area designator that indicates which ++ // city an interconnect is located. For example: "Chicago, IL", ++ // "Amsterdam, Netherlands". ++ City string `json:"city,omitempty"` ++ ++ // Constraints: [Output Only] Constraints on the parameters for creating ++ // Cross-Cloud Interconnect and associated InterconnectAttachments. ++ Constraints *InterconnectRemoteLocationConstraints `json:"constraints,omitempty"` ++ ++ // Continent: [Output Only] Continent for this location, which can take ++ // one of the following values: - AFRICA - ASIA_PAC - EUROPE - ++ // NORTH_AMERICA - SOUTH_AMERICA ++ // ++ // Possible values: ++ // "AFRICA" ++ // "ASIA_PAC" ++ // "EUROPE" ++ // "NORTH_AMERICA" ++ // "SOUTH_AMERICA" ++ Continent string `json:"continent,omitempty"` ++ ++ // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text ++ // format. ++ CreationTimestamp string `json:"creationTimestamp,omitempty"` ++ ++ // Description: [Output Only] An optional description of the resource. ++ Description string `json:"description,omitempty"` ++ ++ // FacilityProvider: [Output Only] The name of the provider for this ++ // facility (e.g., EQUINIX). ++ FacilityProvider string `json:"facilityProvider,omitempty"` ++ ++ // FacilityProviderFacilityId: [Output Only] A provider-assigned ++ // Identifier for this facility (e.g., Ashburn-DC1). ++ FacilityProviderFacilityId string `json:"facilityProviderFacilityId,omitempty"` ++ ++ // Id: [Output Only] The unique identifier for the resource. This ++ // identifier is defined by the server. ++ Id uint64 `json:"id,omitempty,string"` ++ ++ // Kind: [Output Only] Type of the resource. Always ++ // compute#interconnectRemoteLocation for interconnect remote locations. ++ Kind string `json:"kind,omitempty"` ++ ++ // Lacp: [Output Only] Link Aggregation Control Protocol (LACP) ++ // constraints, which can take one of the following values: ++ // LACP_SUPPORTED, LACP_UNSUPPORTED ++ // ++ // Possible values: ++ // "LACP_SUPPORTED" - LACP_SUPPORTED: LACP is supported, and enabled ++ // by default on the Cross-Cloud Interconnect. ++ // "LACP_UNSUPPORTED" - LACP_UNSUPPORTED: LACP is not supported and is ++ // not be enabled on this port. GetDiagnostics shows ++ // bundleAggregationType as "static". GCP does not support LAGs without ++ // LACP, so requestedLinkCount must be 1. ++ Lacp string `json:"lacp,omitempty"` ++ ++ // MaxLagSize100Gbps: [Output Only] The maximum number of 100 Gbps ports ++ // supported in a link aggregation group (LAG). When linkType is 100 ++ // Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps. ++ MaxLagSize100Gbps int64 `json:"maxLagSize100Gbps,omitempty"` ++ ++ // MaxLagSize10Gbps: [Output Only] The maximum number of 10 Gbps ports ++ // supported in a link aggregation group (LAG). When linkType is 10 ++ // Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps. ++ MaxLagSize10Gbps int64 `json:"maxLagSize10Gbps,omitempty"` ++ ++ // Name: [Output Only] Name of the resource. ++ Name string `json:"name,omitempty"` ++ ++ // PeeringdbFacilityId: [Output Only] The peeringdb identifier for this ++ // facility (corresponding with a netfac type in peeringdb). ++ PeeringdbFacilityId string `json:"peeringdbFacilityId,omitempty"` ++ ++ // PermittedConnections: [Output Only] Permitted connections. ++ PermittedConnections []*InterconnectRemoteLocationPermittedConnections `json:"permittedConnections,omitempty"` ++ ++ // RemoteService: [Output Only] Indicates the service provider present ++ // at the remote location. Example values: "Amazon Web Services", ++ // "Microsoft Azure". ++ RemoteService string `json:"remoteService,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for the resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Status: [Output Only] The status of this InterconnectRemoteLocation, ++ // which can take one of the following values: - CLOSED: The ++ // InterconnectRemoteLocation is closed and is unavailable for ++ // provisioning new Cross-Cloud Interconnects. - AVAILABLE: The ++ // InterconnectRemoteLocation is available for provisioning new ++ // Cross-Cloud Interconnects. ++ // ++ // Possible values: ++ // "AVAILABLE" - The InterconnectRemoteLocation is available for ++ // provisioning new Cross-Cloud Interconnects. ++ // "CLOSED" - The InterconnectRemoteLocation is closed for ++ // provisioning new Cross-Cloud Interconnects. ++ Status string `json:"status,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Address") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Address") to include in ++ // API requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocation) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocation ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationConstraints struct { ++ // PortPairRemoteLocation: [Output Only] Port pair remote location ++ // constraints, which can take one of the following values: ++ // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, ++ // PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to ++ // individual ports, but the UI uses this field when ordering a pair of ++ // ports, to prevent users from accidentally ordering something that is ++ // incompatible with their cloud provider. Specifically, when ordering a ++ // redundant pair of Cross-Cloud Interconnect ports, and one of them ++ // uses a remote location with portPairMatchingRemoteLocation set to ++ // matching, the UI requires that both ports use the same remote ++ // location. ++ // ++ // Possible values: ++ // "PORT_PAIR_MATCHING_REMOTE_LOCATION" - If ++ // PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider ++ // allocates ports in pairs, and the user should choose the same remote ++ // location for both ports. ++ // "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" - If ++ // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision ++ // a redundant pair of Cross-Cloud Interconnects using two different ++ // remote locations in the same city. ++ PortPairRemoteLocation string `json:"portPairRemoteLocation,omitempty"` ++ ++ // PortPairVlan: [Output Only] Port pair VLAN constraints, which can ++ // take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, ++ // PORT_PAIR_MATCHING_VLAN ++ // ++ // Possible values: ++ // "PORT_PAIR_MATCHING_VLAN" - If PORT_PAIR_MATCHING_VLAN, the ++ // Interconnect for this attachment is part of a pair of ports that ++ // should have matching VLAN allocations. This occurs with Cross-Cloud ++ // Interconnect to Azure remote locations. While GCP's API does not ++ // explicitly group pairs of ports, the UI uses this field to ensure ++ // matching VLAN ids when configuring a redundant VLAN pair. ++ // "PORT_PAIR_UNCONSTRAINED_VLAN" - PORT_PAIR_UNCONSTRAINED_VLAN means ++ // there is no constraint. ++ PortPairVlan string `json:"portPairVlan,omitempty"` ++ ++ // SubnetLengthRange: [Output Only] [min-length, max-length] The minimum ++ // and maximum value (inclusive) for the IPv4 subnet length. For ++ // example, an interconnectRemoteLocation for Azure has {min: 30, max: ++ // 30} because Azure requires /30 subnets. This range specifies the ++ // values supported by both cloud providers. Interconnect currently ++ // supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no ++ // constraint on IPv4 subnet length, the range would thus be {min: 29, ++ // max: 30}. ++ SubnetLengthRange *InterconnectRemoteLocationConstraintsSubnetLengthRange `json:"subnetLengthRange,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "PortPairRemoteLocation") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "PortPairRemoteLocation") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationConstraints) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationConstraints ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationConstraintsSubnetLengthRange struct { ++ Max int64 `json:"max,omitempty"` ++ ++ Min int64 `json:"min,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Max") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Max") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationConstraintsSubnetLengthRange) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationConstraintsSubnetLengthRange ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectRemoteLocationList: Response to the list request, and ++// contains a list of interconnect remote locations. ++type InterconnectRemoteLocationList struct { ++ // Id: [Output Only] Unique identifier for the resource; defined by the ++ // server. ++ Id string `json:"id,omitempty"` ++ ++ // Items: A list of InterconnectRemoteLocation resources. ++ Items []*InterconnectRemoteLocation `json:"items,omitempty"` ++ ++ // Kind: [Output Only] Type of resource. Always ++ // compute#interconnectRemoteLocationList for lists of interconnect ++ // remote locations. ++ Kind string `json:"kind,omitempty"` ++ ++ // NextPageToken: [Output Only] This token lets you get the next page of ++ // results for list requests. If the number of results is larger than ++ // maxResults, use the nextPageToken as a value for the query parameter ++ // pageToken in the next list request. Subsequent list requests will ++ // have their own nextPageToken to continue paging through the results. ++ NextPageToken string `json:"nextPageToken,omitempty"` ++ ++ // SelfLink: [Output Only] Server-defined URL for this resource. ++ SelfLink string `json:"selfLink,omitempty"` ++ ++ // Warning: [Output Only] Informational warning message. ++ Warning *InterconnectRemoteLocationListWarning `json:"warning,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Id") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Id") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationList ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++// InterconnectRemoteLocationListWarning: [Output Only] Informational ++// warning message. ++type InterconnectRemoteLocationListWarning struct { ++ // Code: [Output Only] A warning code, if applicable. For example, ++ // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in ++ // the response. ++ // ++ // Possible values: ++ // "CLEANUP_FAILED" - Warning about failed cleanup of transient ++ // changes made by a failed operation. ++ // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was ++ // created. ++ // "DEPRECATED_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as deprecated ++ // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk ++ // that is larger than image size. ++ // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the ++ // resources has a type marked as experimental ++ // "EXTERNAL_API_WARNING" - Warning that is present in an external api ++ // call ++ // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been ++ // overridden. Deprecated unused field. ++ // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an ++ // injected kernel, which is deprecated. ++ // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV ++ // backend service is associated with a health check that is not of type ++ // HTTP/HTTPS/HTTP2. ++ // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a ++ // exceedingly large number of resources ++ // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type ++ // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is ++ // not assigned to an instance on the network. ++ // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ++ // ip forward. ++ // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's ++ // nextHopInstance URL refers to an instance that does not have an ipv6 ++ // interface on the same network as the route. ++ // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL ++ // refers to an instance that does not exist. ++ // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance ++ // URL refers to an instance that is not on the same network as the ++ // route. ++ // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not ++ // have a status of RUNNING. ++ // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to ++ // continue the process despite the mentioned error. ++ // "NO_RESULTS_ON_PAGE" - No results are present on a particular list ++ // page. ++ // "PARTIAL_SUCCESS" - Success is reported, but some results may be ++ // missing due to errors ++ // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource ++ // that requires a TOS they have not accepted. ++ // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a ++ // resource is in use. ++ // "RESOURCE_NOT_DELETED" - One or more of the resources set to ++ // auto-delete could not be deleted because they were in use. ++ // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is ++ // ignored. ++ // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in ++ // instance group manager is valid as such, but its application does not ++ // make a lot of sense, because it allows only single instance in ++ // instance group. ++ // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema ++ // are present ++ // "UNREACHABLE" - A given scope cannot be reached. ++ Code string `json:"code,omitempty"` ++ ++ // Data: [Output Only] Metadata about this warning in key: value format. ++ // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" ++ // } ++ Data []*InterconnectRemoteLocationListWarningData `json:"data,omitempty"` ++ ++ // Message: [Output Only] A human-readable description of the warning ++ // code. ++ Message string `json:"message,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Code") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Code") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationListWarningData struct { ++ // Key: [Output Only] A key that provides more detail on the warning ++ // being returned. For example, for warnings where there are no results ++ // in a list request for a particular zone, this key might be scope and ++ // the key value might be the zone name. Other examples might be a key ++ // indicating a deprecated resource and a suggested replacement, or a ++ // warning about invalid network settings (for example, if an instance ++ // attempts to perform IP forwarding but is not enabled for IP ++ // forwarding). ++ Key string `json:"key,omitempty"` ++ ++ // Value: [Output Only] A warning data value corresponding to the key. ++ Value string `json:"value,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationListWarningData ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ ++type InterconnectRemoteLocationPermittedConnections struct { ++ // InterconnectLocation: [Output Only] URL of an Interconnect location ++ // that is permitted to connect to this Interconnect remote location. ++ InterconnectLocation string `json:"interconnectLocation,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "InterconnectLocation") to unconditionally include in API requests. ++ // By default, fields with empty or default values are omitted from API ++ // requests. However, any non-pointer, non-interface field appearing in ++ // ForceSendFields will be sent to the server regardless of whether the ++ // field is empty or not. This may be used to include empty fields in ++ // Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "InterconnectLocation") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { ++ type NoMethod InterconnectRemoteLocationPermittedConnections ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // InterconnectsGetDiagnosticsResponse: Response for the + // InterconnectsGetDiagnosticsRequest. + type InterconnectsGetDiagnosticsResponse struct { +@@ -25936,7 +26867,7 @@ type MachineTypeAccelerators struct { + GuestAcceleratorCount int64 `json:"guestAcceleratorCount,omitempty"` + + // GuestAcceleratorType: The accelerator type resource name, not a full +- // URL, e.g. 'nvidia-tesla-k80'. ++ // URL, e.g. nvidia-tesla-t4. + GuestAcceleratorType string `json:"guestAcceleratorType,omitempty"` + + // ForceSendFields is a list of field names (e.g. +@@ -26618,9 +27549,9 @@ type ManagedInstance struct { + // is empty when the instance does not exist. + // + // Possible values: +- // "DEPROVISIONING" - The Nanny is halted and we are performing tear +- // down tasks like network deprogramming, releasing quota, IP, tearing +- // down disks etc. ++ // "DEPROVISIONING" - The instance is halted and we are performing ++ // tear down tasks like network deprogramming, releasing quota, IP, ++ // tearing down disks etc. + // "PROVISIONING" - Resources are being allocated for the instance. + // "REPAIRING" - The instance is in repair. + // "RUNNING" - The instance is running. +@@ -27130,7 +28061,7 @@ type Network struct { + FirewallPolicy string `json:"firewallPolicy,omitempty"` + + // GatewayIPv4: [Output Only] The gateway address for default routing +- // out of the network, selected by GCP. ++ // out of the network, selected by Google Cloud. + GatewayIPv4 string `json:"gatewayIPv4,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This +@@ -27242,10 +28173,9 @@ type NetworkAttachment struct { + // property when you create the resource. + Description string `json:"description,omitempty"` + +- // Fingerprint: [Output Only] Fingerprint of this resource. A hash of +- // the contents stored in this object. This field is used in optimistic +- // locking. An up-to-date fingerprint must be provided in order to +- // patch. ++ // Fingerprint: Fingerprint of this resource. A hash of the contents ++ // stored in this object. This field is used in optimistic locking. An ++ // up-to-date fingerprint must be provided in order to patch. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource type. The +@@ -27265,7 +28195,11 @@ type NetworkAttachment struct { + Name string `json:"name,omitempty"` + + // Network: [Output Only] The URL of the network which the Network +- // Attachment belongs to. ++ // Attachment belongs to. Practically it is inferred by fetching the ++ // network of the first subnetwork associated. Because it is required ++ // that all the subnetworks must be from the same network, it is assured ++ // that the Network Attachment belongs to the same network as all the ++ // subnetworks. + Network string `json:"network,omitempty"` + + // ProducerAcceptLists: Projects that are allowed to connect to this +@@ -27516,7 +28450,7 @@ func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, erro + // NetworkAttachmentConnectedEndpoint: [Output Only] A connection + // connected to this network attachment. + type NetworkAttachmentConnectedEndpoint struct { +- // IpAddress: The IP address assigned to the producer instance network ++ // IpAddress: The IPv4 address assigned to the producer instance network + // interface. This value will be a range in case of Serverless. + IpAddress string `json:"ipAddress,omitempty"` + +@@ -27524,7 +28458,7 @@ type NetworkAttachmentConnectedEndpoint struct { + // the IP was assigned. + ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` + +- // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork ++ // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork. + SecondaryIpCidrRanges []string `json:"secondaryIpCidrRanges,omitempty"` + + // Status: The status of a connected endpoint to this network +@@ -29708,10 +30642,11 @@ type NetworkInterface struct { + // number. It'll be empty if not specified by the users. + QueueCount int64 `json:"queueCount,omitempty"` + +- // StackType: The stack type for this network interface to identify +- // whether the IPv6 feature is enabled or not. If not specified, +- // IPV4_ONLY will be used. This field can be both set at instance +- // creation and update network interface operations. ++ // StackType: The stack type for this network interface. To assign only ++ // IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 ++ // addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This ++ // field can be both set at instance creation and update network ++ // interface operations. + // + // Possible values: + // "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 +@@ -31398,6 +32333,33 @@ func (s *NodeGroupsSetNodeTemplateRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type NodeGroupsSimulateMaintenanceEventRequest struct { ++ // Nodes: Names of the nodes to go under maintenance simulation. ++ Nodes []string `json:"nodes,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Nodes") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Nodes") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NodeGroupsSimulateMaintenanceEventRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod NodeGroupsSimulateMaintenanceEventRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // NodeTemplate: Represent a sole-tenant Node Template resource. You can + // use a template to define properties for nodes in a node group. For + // more information, read Creating node groups and instances. +@@ -36809,6 +37771,7 @@ type Quota struct { + // "COMMITTED_NVIDIA_A100_80GB_GPUS" + // "COMMITTED_NVIDIA_A100_GPUS" + // "COMMITTED_NVIDIA_K80_GPUS" ++ // "COMMITTED_NVIDIA_L4_GPUS" + // "COMMITTED_NVIDIA_P100_GPUS" + // "COMMITTED_NVIDIA_P4_GPUS" + // "COMMITTED_NVIDIA_T4_GPUS" +@@ -36860,11 +37823,15 @@ type Quota struct { + // "NETWORK_ATTACHMENTS" + // "NETWORK_ENDPOINT_GROUPS" + // "NETWORK_FIREWALL_POLICIES" ++ // "NET_LB_SECURITY_POLICIES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULES_PER_REGION" ++ // "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION" + // "NODE_GROUPS" + // "NODE_TEMPLATES" + // "NVIDIA_A100_80GB_GPUS" + // "NVIDIA_A100_GPUS" + // "NVIDIA_K80_GPUS" ++ // "NVIDIA_L4_GPUS" + // "NVIDIA_P100_GPUS" + // "NVIDIA_P100_VWS_GPUS" + // "NVIDIA_P4_GPUS" +@@ -36879,6 +37846,7 @@ type Quota struct { + // "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS" + // "PREEMPTIBLE_NVIDIA_A100_GPUS" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" ++ // "PREEMPTIBLE_NVIDIA_L4_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_GPUS" +@@ -36902,6 +37870,7 @@ type Quota struct { + // "ROUTES" + // "SECURITY_POLICIES" + // "SECURITY_POLICIES_PER_REGION" ++ // "SECURITY_POLICY_ADVANCED_RULES_PER_REGION" + // "SECURITY_POLICY_CEVAL_RULES" + // "SECURITY_POLICY_RULES" + // "SECURITY_POLICY_RULES_PER_REGION" +@@ -37141,6 +38110,44 @@ func (s *Region) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type RegionAddressesMoveRequest struct { ++ // Description: An optional destination address description if intended ++ // to be different from the source. ++ Description string `json:"description,omitempty"` ++ ++ // DestinationAddress: The URL of the destination address to move to. ++ // This can be a full or partial URL. For example, the following are all ++ // valid URLs to a address: - ++ // https://www.googleapis.com/compute/v1/projects/project/regions/region ++ // /addresses/address - ++ // projects/project/regions/region/addresses/address Note that ++ // destination project must be different from the source project. So ++ // /regions/region/addresses/address is not valid partial url. ++ DestinationAddress string `json:"destinationAddress,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Description") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Description") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *RegionAddressesMoveRequest) MarshalJSON() ([]byte, error) { ++ type NoMethod RegionAddressesMoveRequest ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // RegionAutoscalerList: Contains a list of autoscalers. + type RegionAutoscalerList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the +@@ -42782,10 +43789,9 @@ type RouterNat struct { + // in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list + // of Subnetworks are allowed to Nat (specified in the field subnetwork + // below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. +- // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or +- // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any +- // other Router.Nat section in any Router for this network in this +- // region. ++ // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then ++ // there should not be any other Router.Nat section in any Router for ++ // this network in this region. + // + // Possible values: + // "ALL_SUBNETWORKS_ALL_IP_RANGES" - All the IP ranges in every +@@ -44462,6 +45468,20 @@ type SecurityPolicy struct { + // compute#securityPolicyfor security policies + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // security policy, which is essentially a hash of the labels set used ++ // for optimistic locking. The fingerprint is initially generated by ++ // Compute Engine and changes after every request to modify or update ++ // labels. You must always provide an up-to-date fingerprint hash in ++ // order to update or change labels. To see the latest fingerprint, make ++ // get() request to the security policy. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -44576,13 +45596,17 @@ func (s *SecurityPolicyAdaptiveProtectionConfig) MarshalJSON() ([]byte, error) { + } + + // SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig: +-// Configuration options for L7 DDoS detection. ++// Configuration options for L7 DDoS detection. This field is only ++// supported in Global Security Policies of type CLOUD_ARMOR. + type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { +- // Enable: If set to true, enables CAAP for L7 DDoS detection. ++ // Enable: If set to true, enables CAAP for L7 DDoS detection. This ++ // field is only supported in Global Security Policies of type ++ // CLOUD_ARMOR. + Enable bool `json:"enable,omitempty"` + + // RuleVisibility: Rule visibility can be one of the following: STANDARD +- // - opaque rules. (default) PREMIUM - transparent rules. ++ // - opaque rules. (default) PREMIUM - transparent rules. This field is ++ // only supported in Global Security Policies of type CLOUD_ARMOR. + // + // Possible values: + // "PREMIUM" +@@ -44905,7 +45929,8 @@ type SecurityPolicyRecaptchaOptionsConfig struct { + // GOOGLE_RECAPTCHA under the security policy. The specified site key + // needs to be created from the reCAPTCHA API. The user is responsible + // for the validity of the specified site key. If not specified, a +- // Google-managed site key is used. ++ // Google-managed site key is used. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectSiteKey string `json:"redirectSiteKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RedirectSiteKey") to +@@ -44973,10 +45998,11 @@ type SecurityPolicyRule struct { + // rate_limit_options to be set. - redirect: redirect to a different + // target. This can either be an internal reCAPTCHA redirect, or an + // external URL-based redirect via a 302 response. Parameters for this +- // action can be configured via redirectOptions. - throttle: limit +- // client traffic to the configured threshold. Configure parameters for +- // this action in rateLimitOptions. Requires rate_limit_options to be +- // set for this. ++ // action can be configured via redirectOptions. This action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. - ++ // throttle: limit client traffic to the configured threshold. Configure ++ // parameters for this action in rateLimitOptions. Requires ++ // rate_limit_options to be set for this. + Action string `json:"action,omitempty"` + + // Description: An optional description of this resource. Provide this +@@ -44984,7 +46010,8 @@ type SecurityPolicyRule struct { + Description string `json:"description,omitempty"` + + // HeaderAction: Optional, additional actions that are performed on +- // headers. ++ // headers. This field is only supported in Global Security Policies of ++ // type CLOUD_ARMOR. + HeaderAction *SecurityPolicyRuleHttpHeaderAction `json:"headerAction,omitempty"` + + // Kind: [Output only] Type of the resource. Always +@@ -45015,7 +46042,8 @@ type SecurityPolicyRule struct { + RateLimitOptions *SecurityPolicyRuleRateLimitOptions `json:"rateLimitOptions,omitempty"` + + // RedirectOptions: Parameters defining the redirect action. Cannot be +- // specified for any other actions. ++ // specified for any other actions. This field is only supported in ++ // Global Security Policies of type CLOUD_ARMOR. + RedirectOptions *SecurityPolicyRuleRedirectOptions `json:"redirectOptions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the +@@ -45115,7 +46143,13 @@ type SecurityPolicyRuleMatcher struct { + + // Expr: User defined CEVAL expression. A CEVAL expression is used to + // specify match criteria such as origin.ip, source.region_code and +- // contents in the request header. ++ // contents in the request header. Expressions containing ++ // `evaluateThreatIntelligence` require Cloud Armor Managed Protection ++ // Plus tier and are not supported in Edge Policies nor in Regional ++ // Policies. Expressions containing ++ // `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor ++ // Managed Protection Plus tier and are only supported in Global ++ // Security Policies. + Expr *Expr `json:"expr,omitempty"` + + // VersionedExpr: Preconfigured versioned expression. If this field is +@@ -45354,6 +46388,13 @@ type SecurityPolicyRuleRateLimitOptions struct { + // "XFF_IP" + EnforceOnKey string `json:"enforceOnKey,omitempty"` + ++ // EnforceOnKeyConfigs: If specified, any combination of values of ++ // enforce_on_key_type/enforce_on_key_name is treated as the key on ++ // which ratelimit threshold/action is enforced. You can specify up to 3 ++ // enforce_on_key_configs. If enforce_on_key_configs is specified, ++ // enforce_on_key must not be specified. ++ EnforceOnKeyConfigs []*SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig `json:"enforceOnKeyConfigs,omitempty"` ++ + // EnforceOnKeyName: Rate limit key name applicable only for the + // following key types: HTTP_HEADER -- Name of the HTTP header whose + // value is taken as the key value. HTTP_COOKIE -- Name of the HTTP +@@ -45365,12 +46406,14 @@ type SecurityPolicyRuleRateLimitOptions struct { + // response code, or redirect to a different endpoint. Valid options are + // `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, + // and 502, and `redirect`, where the redirect parameters come from +- // `exceedRedirectOptions` below. ++ // `exceedRedirectOptions` below. The `redirect` action is only ++ // supported in Global Security Policies of type CLOUD_ARMOR. + ExceedAction string `json:"exceedAction,omitempty"` + + // ExceedRedirectOptions: Parameters defining the redirect action that + // is used as the exceed action. Cannot be specified if the exceed +- // action is not redirect. ++ // action is not redirect. This field is only supported in Global ++ // Security Policies of type CLOUD_ARMOR. + ExceedRedirectOptions *SecurityPolicyRuleRedirectOptions `json:"exceedRedirectOptions,omitempty"` + + // RateLimitThreshold: Threshold at which to begin ratelimiting. +@@ -45400,6 +46443,71 @@ func (s *SecurityPolicyRuleRateLimitOptions) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { ++ // EnforceOnKeyName: Rate limit key name applicable only for the ++ // following key types: HTTP_HEADER -- Name of the HTTP header whose ++ // value is taken as the key value. HTTP_COOKIE -- Name of the HTTP ++ // cookie whose value is taken as the key value. ++ EnforceOnKeyName string `json:"enforceOnKeyName,omitempty"` ++ ++ // EnforceOnKeyType: Determines the key to enforce the ++ // rate_limit_threshold on. Possible values are: - ALL: A single rate ++ // limit threshold is applied to all the requests matching this rule. ++ // This is the default value if "enforceOnKeyConfigs" is not configured. ++ // - IP: The source IP address of the request is the key. Each IP has ++ // this limit enforced separately. - HTTP_HEADER: The value of the HTTP ++ // header whose name is configured under "enforceOnKeyName". The key ++ // value is truncated to the first 128 bytes of the header value. If no ++ // such header is present in the request, the key type defaults to ALL. ++ // - XFF_IP: The first IP address (i.e. the originating client IP ++ // address) specified in the list of IPs under X-Forwarded-For HTTP ++ // header. If no such header is present or the value is not a valid IP, ++ // the key defaults to the source IP address of the request i.e. key ++ // type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is ++ // configured under "enforceOnKeyName". The key value is truncated to ++ // the first 128 bytes of the cookie value. If no such cookie is present ++ // in the request, the key type defaults to ALL. - HTTP_PATH: The URL ++ // path of the HTTP request. The key value is truncated to the first 128 ++ // bytes. - SNI: Server name indication in the TLS session of the HTTPS ++ // request. The key value is truncated to the first 128 bytes. The key ++ // type defaults to ALL on a HTTP session. - REGION_CODE: The ++ // country/region from which the request originates. ++ // ++ // Possible values: ++ // "ALL" ++ // "HTTP_COOKIE" ++ // "HTTP_HEADER" ++ // "HTTP_PATH" ++ // "IP" ++ // "REGION_CODE" ++ // "SNI" ++ // "XFF_IP" ++ EnforceOnKeyType string `json:"enforceOnKeyType,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "EnforceOnKeyName") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "EnforceOnKeyName") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig) MarshalJSON() ([]byte, error) { ++ type NoMethod SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + type SecurityPolicyRuleRateLimitOptionsThreshold struct { + // Count: Number of HTTP(S) requests for calculating the threshold. + Count int64 `json:"count,omitempty"` +@@ -45473,7 +46581,7 @@ type SecuritySettings struct { + // should authenticate with this service's backends. clientTlsPolicy + // only applies to a global BackendService with the loadBalancingScheme + // set to INTERNAL_SELF_MANAGED. If left blank, communications are not +- // encrypted. Note: This field currently has no impact. ++ // encrypted. + ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` + + // SubjectAltNames: Optional. A list of Subject Alternative Names (SANs) +@@ -45488,8 +46596,7 @@ type SecuritySettings struct { + // Public Key Infrastructure which provisions server identities. Only + // applies to a global BackendService with loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. Only applies when BackendService has an +- // attached clientTlsPolicy with clientCertificate (mTLS mode). Note: +- // This field currently has no impact. ++ // attached clientTlsPolicy with clientCertificate (mTLS mode). + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClientTlsPolicy") to +@@ -45638,7 +46745,7 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + // attachment represents a service that a producer has exposed. It + // encapsulates the load balancer which fronts the service runs and a + // list of NAT IP ranges that the producers uses to represent the +-// consumers connecting to the service. next tag = 20 ++// consumers connecting to the service. + type ServiceAttachment struct { + // ConnectedEndpoints: [Output Only] An array of connections for all the + // consumers connected to this service attachment. +@@ -45723,6 +46830,18 @@ type ServiceAttachment struct { + // the PSC service attachment. + PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"` + ++ // ReconcileConnections: This flag determines whether a consumer ++ // accept/reject list change can reconcile the statuses of existing ++ // ACCEPTED or REJECTED PSC endpoints. - If false, connection policy ++ // update will only affect existing PENDING PSC endpoints. Existing ++ // ACCEPTED/REJECTED endpoints will remain untouched regardless how the ++ // connection policy is modified . - If true, update will affect both ++ // PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED ++ // PSC endpoint will be moved to REJECTED if its project is added to the ++ // reject list. For newly created service attachment, this boolean ++ // defaults to true. ++ ReconcileConnections bool `json:"reconcileConnections,omitempty"` ++ + // Region: [Output Only] URL of the region where the service attachment + // resides. This field applies only to the region resource. You must + // specify this field as part of the HTTP request URL. It is not +@@ -48933,8 +50052,8 @@ type Subnetwork struct { + // If this field is not explicitly set, it will not appear in get + // listings. If not set the default behavior is determined by the org + // policy, if there is no org policy specified, then it will default to +- // disabled. This field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // disabled. This field isn't supported if the subnet purpose field is ++ // set to REGIONAL_MANAGED_PROXY. + EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` + + // ExternalIpv6Prefix: The external IPv6 address range that is owned by +@@ -49027,12 +50146,20 @@ type Subnetwork struct { + PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` + + // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal +@@ -49051,9 +50178,9 @@ type Subnetwork struct { + Region string `json:"region,omitempty"` + + // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // +@@ -49528,6 +50655,8 @@ type SubnetworkLogConfig struct { + // field is not explicitly set, it will not appear in get listings. If + // not set the default behavior is determined by the org policy, if + // there is no org policy specified, then it will default to disabled. ++ // Flow logging isn't supported if the subnet purpose field is set to ++ // REGIONAL_MANAGED_PROXY. + Enable bool `json:"enable,omitempty"` + + // FilterExpr: Can only be specified if VPC flow logs for this +@@ -50992,7 +52121,9 @@ func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) + + type TargetHttpsProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetHttpsProxy. ++ // TargetHttpsProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -51109,7 +52240,9 @@ type TargetHttpsProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -51187,9 +52320,11 @@ type TargetHttpsProxy struct { + // networksecurity.ServerTlsPolicy resource that describes how the proxy + // should authenticate inbound traffic. serverTlsPolicy only applies to + // a global TargetHttpsProxy attached to globalForwardingRules with the +- // loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, +- // communications are not encrypted. Note: This field currently has no +- // impact. ++ // loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or ++ // EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are ++ // accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, ++ // EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy ++ // documentation. If left blank, communications are not encrypted. + ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to +@@ -53175,7 +54310,9 @@ func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) + + type TargetSslProxiesSetCertificateMapRequest struct { + // CertificateMap: URL of the Certificate Map to associate with this +- // TargetSslProxy. ++ // TargetSslProxy. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CertificateMap") to +@@ -53273,7 +54410,9 @@ type TargetSslProxy struct { + // CertificateMap: URL of a certificate map that identifies a + // certificate map associated with the given target proxy. This field + // can only be set for global target proxies. If set, sslCertificates +- // will be ignored. ++ // will be ignored. Accepted format is ++ // //certificatemanager.googleapis.com/projects/{project ++ // }/locations/{location}/certificateMaps/{resourceName}. + CertificateMap string `json:"certificateMap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text +@@ -54268,6 +55407,21 @@ type TargetVpnGateway struct { + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // TargetVpnGateway, which is essentially a hash of the labels set used ++ // for optimistic locking. The fingerprint is initially generated by ++ // Compute Engine and changes after every request to modify or update ++ // labels. You must always provide an up-to-date fingerprint hash in ++ // order to update or change labels, otherwise the request will fail ++ // with error 412 conditionNotMet. To see the latest fingerprint, make a ++ // get() request to retrieve a TargetVpnGateway. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and +@@ -55980,6 +57134,22 @@ type UrlRewrite struct { + // characters. + PathPrefixRewrite string `json:"pathPrefixRewrite,omitempty"` + ++ // PathTemplateRewrite: If specified, the pattern rewrites the URL path ++ // (based on the :path header) using the HTTP template syntax. A ++ // corresponding path_template_match must be specified. Any template ++ // variables must exist in the path_template_match field. - -At least ++ // one variable must be specified in the path_template_match field - You ++ // can omit variables from the rewritten URL - The * and ** operators ++ // cannot be matched unless they have a corresponding variable name - ++ // e.g. {format=*} or {var=**}. For example, a path_template_match of ++ // /static/{format=**} could be rewritten as /static/content/{format} to ++ // prefix /content to the URL. Variables can also be re-ordered in a ++ // rewrite, so that /{country}/{format}/{suffix=**} can be rewritten as ++ // /content/{format}/{country}/{suffix}. At least one non-empty ++ // routeRules[].matchRules[].path_template_match is required. Only one ++ // of path_prefix_rewrite or path_template_rewrite may be specified. ++ PathTemplateRewrite string `json:"pathTemplateRewrite,omitempty"` ++ + // ForceSendFields is a list of field names (e.g. "HostRewrite") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any +@@ -56033,12 +57203,20 @@ type UsableSubnetwork struct { + Network string `json:"network,omitempty"` + + // Purpose: The purpose of the resource. This field can be either +- // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with +- // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created +- // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If +- // unspecified, the purpose defaults to PRIVATE_RFC_1918. The +- // enableFlowLogs field isn't supported with the purpose field set to +- // INTERNAL_HTTPS_LOAD_BALANCER. ++ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or ++ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for ++ // user-created subnets or subnets that are automatically created in ++ // auto mode networks. A subnet with purpose set to ++ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved ++ // for regional Envoy-based load balancers. A subnet with purpose set to ++ // PRIVATE_SERVICE_CONNECT is used to publish services using Private ++ // Service Connect. A subnet with purpose set to ++ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used ++ // only by regional internal HTTP(S) load balancers. Note that ++ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional ++ // Envoy load balancers. If unspecified, the subnet purpose defaults to ++ // PRIVATE. The enableFlowLogs field isn't supported if the subnet ++ // purpose field is set to REGIONAL_MANAGED_PROXY. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal +@@ -56053,9 +57231,9 @@ type UsableSubnetwork struct { + Purpose string `json:"purpose,omitempty"` + + // Role: The role of subnetwork. Currently, this field is only used when +- // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to +- // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being +- // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one ++ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or ++ // BACKUP. An ACTIVE subnetwork is one that is currently being used for ++ // Envoy-based load balancers in a region. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // +@@ -57284,7 +58462,7 @@ type VpnGatewayStatusTunnel struct { + + // PeerGatewayInterface: The peer gateway interface this VPN tunnel is + // connected to, the peer gateway could either be an external VPN +- // gateway or GCP VPN gateway. ++ // gateway or a Google Cloud VPN gateway. + PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` + + // TunnelUrl: URL reference to the VPN tunnel. +@@ -57317,8 +58495,8 @@ func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { + + // VpnGatewayStatusVpnConnection: A VPN connection contains all VPN + // tunnels connected from this VpnGateway to the same peer gateway. The +-// peer gateway could either be a external VPN gateway or GCP VPN +-// gateway. ++// peer gateway could either be an external VPN gateway or a Google ++// Cloud VPN gateway. + type VpnGatewayStatusVpnConnection struct { + // PeerExternalGateway: URL reference to the peer external VPN gateways + // to which the VPN tunnels in this VPN connection are connected. This +@@ -57635,6 +58813,21 @@ type VpnTunnel struct { + // VPN tunnels. + Kind string `json:"kind,omitempty"` + ++ // LabelFingerprint: A fingerprint for the labels being applied to this ++ // VpnTunnel, which is essentially a hash of the labels set used for ++ // optimistic locking. The fingerprint is initially generated by Compute ++ // Engine and changes after every request to modify or update labels. ++ // You must always provide an up-to-date fingerprint hash in order to ++ // update or change labels, otherwise the request will fail with error ++ // 412 conditionNotMet. To see the latest fingerprint, make a get() ++ // request to retrieve a VpnTunnel. ++ LabelFingerprint string `json:"labelFingerprint,omitempty"` ++ ++ // Labels: Labels for this resource. These can only be added or modified ++ // by the setLabels method. Each label key/value pair must comply with ++ // RFC1035. Label values may be empty. ++ Labels map[string]string `json:"labels,omitempty"` ++ + // LocalTrafficSelector: Local traffic selector to use when establishing + // the VPN tunnel with the peer VPN gateway. The value should be a CIDR + // formatted string, for example: 192.168.0.0/16. The ranges must be +@@ -60906,6 +62099,194 @@ func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) erro + } + } + ++// method id "compute.addresses.move": ++ ++type AddressesMoveCall struct { ++ s *Service ++ project string ++ region string ++ address string ++ regionaddressesmoverequest *RegionAddressesMoveRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Move: Moves the specified address resource. ++// ++// - address: Name of the address resource to move. ++// - project: Source project ID which the Address is moved from. ++// - region: Name of the region for this request. ++func (r *AddressesService) Move(project string, region string, address string, regionaddressesmoverequest *RegionAddressesMoveRequest) *AddressesMoveCall { ++ c := &AddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.region = region ++ c.address = address ++ c.regionaddressesmoverequest = regionaddressesmoverequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *AddressesMoveCall) RequestId(requestId string) *AddressesMoveCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *AddressesMoveCall) Fields(s ...googleapi.Field) *AddressesMoveCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *AddressesMoveCall) Context(ctx context.Context) *AddressesMoveCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *AddressesMoveCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *AddressesMoveCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionaddressesmoverequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{address}/move") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "region": c.region, ++ "address": c.address, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.addresses.move" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *AddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Moves the specified address resource.", ++ // "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", ++ // "httpMethod": "POST", ++ // "id": "compute.addresses.move", ++ // "parameterOrder": [ ++ // "project", ++ // "region", ++ // "address" ++ // ], ++ // "parameters": { ++ // "address": { ++ // "description": "Name of the address resource to move.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Source project ID which the Address is moved from.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "region": { ++ // "description": "Name of the region for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/regions/{region}/addresses/{address}/move", ++ // "request": { ++ // "$ref": "RegionAddressesMoveRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.addresses.setLabels": + + type AddressesSetLabelsCall struct { +@@ -78150,6 +79531,183 @@ func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList + } + } + ++// method id "compute.globalAddresses.move": ++ ++type GlobalAddressesMoveCall struct { ++ s *Service ++ project string ++ address string ++ globaladdressesmoverequest *GlobalAddressesMoveRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Move: Moves the specified address resource from one project to ++// another project. ++// ++// - address: Name of the address resource to move. ++// - project: Source project ID which the Address is moved from. ++func (r *GlobalAddressesService) Move(project string, address string, globaladdressesmoverequest *GlobalAddressesMoveRequest) *GlobalAddressesMoveCall { ++ c := &GlobalAddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.address = address ++ c.globaladdressesmoverequest = globaladdressesmoverequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *GlobalAddressesMoveCall) RequestId(requestId string) *GlobalAddressesMoveCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *GlobalAddressesMoveCall) Fields(s ...googleapi.Field) *GlobalAddressesMoveCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *GlobalAddressesMoveCall) Context(ctx context.Context) *GlobalAddressesMoveCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *GlobalAddressesMoveCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *GlobalAddressesMoveCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.globaladdressesmoverequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}/move") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "address": c.address, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.globalAddresses.move" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *GlobalAddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Moves the specified address resource from one project to another project.", ++ // "flatPath": "projects/{project}/global/addresses/{address}/move", ++ // "httpMethod": "POST", ++ // "id": "compute.globalAddresses.move", ++ // "parameterOrder": [ ++ // "project", ++ // "address" ++ // ], ++ // "parameters": { ++ // "address": { ++ // "description": "Name of the address resource to move.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Source project ID which the Address is moved from.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/addresses/{address}/move", ++ // "request": { ++ // "$ref": "GlobalAddressesMoveRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.globalAddresses.setLabels": + + type GlobalAddressesSetLabelsCall struct { +@@ -104089,164 +105647,6 @@ func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, + return c + } + +-// Fields allows partial responses to be retrieved. See +-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +-// for more information. +-func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { +- c.urlParams_.Set("fields", googleapi.CombineFields(s)) +- return c +-} +- +-// Context sets the context to be used in this call's Do method. Any +-// pending HTTP request will be aborted if the provided context is +-// canceled. +-func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { +- c.ctx_ = ctx +- return c +-} +- +-// Header returns an http.Header that can be modified by the caller to +-// add HTTP headers to the request. +-func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { +- if c.header_ == nil { +- c.header_ = make(http.Header) +- } +- return c.header_ +-} +- +-func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { +- reqHeaders := make(http.Header) +- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) +- for k, v := range c.header_ { +- reqHeaders[k] = v +- } +- reqHeaders.Set("User-Agent", c.s.userAgent()) +- var body io.Reader = nil +- c.urlParams_.Set("alt", alt) +- c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") +- urls += "?" + c.urlParams_.Encode() +- req, err := http.NewRequest("POST", urls, body) +- if err != nil { +- return nil, err +- } +- req.Header = reqHeaders +- googleapi.Expand(req.URL, map[string]string{ +- "project": c.project, +- "zone": c.zone, +- "instance": c.instance, +- }) +- return gensupport.SendRequest(c.ctx_, c.s.client, req) +-} +- +-// Do executes the "compute.instances.simulateMaintenanceEvent" call. +-// Exactly one of *Operation or error will be non-nil. Any non-2xx +-// status code is an error. Response headers are in either +-// *Operation.ServerResponse.Header or (if a response was returned at +-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +-// to check whether the returned error was because +-// http.StatusNotModified was returned. +-func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +- gensupport.SetOptions(c.urlParams_, opts...) +- res, err := c.doRequest("json") +- if res != nil && res.StatusCode == http.StatusNotModified { +- if res.Body != nil { +- res.Body.Close() +- } +- return nil, gensupport.WrapError(&googleapi.Error{ +- Code: res.StatusCode, +- Header: res.Header, +- }) +- } +- if err != nil { +- return nil, err +- } +- defer googleapi.CloseBody(res) +- if err := googleapi.CheckResponse(res); err != nil { +- return nil, gensupport.WrapError(err) +- } +- ret := &Operation{ +- ServerResponse: googleapi.ServerResponse{ +- Header: res.Header, +- HTTPStatusCode: res.StatusCode, +- }, +- } +- target := &ret +- if err := gensupport.DecodeResponse(target, res); err != nil { +- return nil, err +- } +- return ret, nil +- // { +- // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", +- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", +- // "httpMethod": "POST", +- // "id": "compute.instances.simulateMaintenanceEvent", +- // "parameterOrder": [ +- // "project", +- // "zone", +- // "instance" +- // ], +- // "parameters": { +- // "instance": { +- // "description": "Name of the instance scoping this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +- // "required": true, +- // "type": "string" +- // }, +- // "project": { +- // "description": "Project ID for this request.", +- // "location": "path", +- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +- // "required": true, +- // "type": "string" +- // }, +- // "zone": { +- // "description": "The name of the zone for this request.", +- // "location": "path", +- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +- // "required": true, +- // "type": "string" +- // } +- // }, +- // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", +- // "response": { +- // "$ref": "Operation" +- // }, +- // "scopes": [ +- // "https://www.googleapis.com/auth/cloud-platform", +- // "https://www.googleapis.com/auth/compute" +- // ] +- // } +- +-} +- +-// method id "compute.instances.start": +- +-type InstancesStartCall struct { +- s *Service +- project string +- zone string +- instance string +- urlParams_ gensupport.URLParams +- ctx_ context.Context +- header_ http.Header +-} +- +-// Start: Starts an instance that was stopped using the instances().stop +-// method. For more information, see Restart an instance. +-// +-// - instance: Name of the instance resource to start. +-// - project: Project ID for this request. +-// - zone: The name of the zone for this request. +-func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { +- c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} +- c.project = project +- c.zone = zone +- c.instance = instance +- return c +-} +- + // RequestId sets the optional parameter "requestId": An optional + // request ID to identify requests. Specify a unique request ID so that + // if you must retry your request, the server will know to ignore the +@@ -104258,7 +105658,7 @@ func (r *InstancesService) Start(project string, zone string, instance string) * + // clients from accidentally creating duplicate commitments. The request + // ID must be a valid UUID with the exception that zero UUID is not + // supported ( 00000000-0000-0000-0000-000000000000). +-func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("requestId", requestId) + return c + } +@@ -104266,7 +105666,7 @@ func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { + // Fields allows partial responses to be retrieved. See + // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse + // for more information. +-func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c + } +@@ -104274,21 +105674,21 @@ func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { + // Context sets the context to be used in this call's Do method. Any + // pending HTTP request will be aborted if the provided context is + // canceled. +-func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { ++func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { + c.ctx_ = ctx + return c + } + + // Header returns an http.Header that can be modified by the caller to + // add HTTP headers to the request. +-func (c *InstancesStartCall) Header() http.Header { ++func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ + } + +-func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { ++func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { +@@ -104298,7 +105698,7 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") +- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { +@@ -104313,14 +105713,193 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) + } + +-// Do executes the "compute.instances.start" call. ++// Do executes the "compute.instances.simulateMaintenanceEvent" call. + // Exactly one of *Operation or error will be non-nil. Any non-2xx + // status code is an error. Response headers are in either + // *Operation.ServerResponse.Header or (if a response was returned at + // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified + // to check whether the returned error was because + // http.StatusNotModified was returned. +-func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", ++ // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", ++ // "httpMethod": "POST", ++ // "id": "compute.instances.simulateMaintenanceEvent", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "instance" ++ // ], ++ // "parameters": { ++ // "instance": { ++ // "description": "Name of the instance scoping this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.instances.start": ++ ++type InstancesStartCall struct { ++ s *Service ++ project string ++ zone string ++ instance string ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Start: Starts an instance that was stopped using the instances().stop ++// method. For more information, see Restart an instance. ++// ++// - instance: Name of the instance resource to start. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { ++ c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.instance = instance ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InstancesStartCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "instance": c.instance, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.instances.start" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { +@@ -108151,6 +109730,449 @@ func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*Inter + } + } + ++// method id "compute.interconnectRemoteLocations.get": ++ ++type InterconnectRemoteLocationsGetCall struct { ++ s *Service ++ project string ++ interconnectRemoteLocation string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// Get: Returns the details for the specified interconnect remote ++// location. Gets a list of available interconnect remote locations by ++// making a list() request. ++// ++// - interconnectRemoteLocation: Name of the interconnect remote ++// location to return. ++// - project: Project ID for this request. ++func (r *InterconnectRemoteLocationsService) Get(project string, interconnectRemoteLocation string) *InterconnectRemoteLocationsGetCall { ++ c := &InterconnectRemoteLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.interconnectRemoteLocation = interconnectRemoteLocation ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InterconnectRemoteLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsGetCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InterconnectRemoteLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsGetCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InterconnectRemoteLocationsGetCall) Context(ctx context.Context) *InterconnectRemoteLocationsGetCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InterconnectRemoteLocationsGetCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InterconnectRemoteLocationsGetCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "interconnectRemoteLocation": c.interconnectRemoteLocation, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.interconnectRemoteLocations.get" call. ++// Exactly one of *InterconnectRemoteLocation or error will be non-nil. ++// Any non-2xx status code is an error. Response headers are in either ++// *InterconnectRemoteLocation.ServerResponse.Header or (if a response ++// was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectRemoteLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &InterconnectRemoteLocation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", ++ // "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ // "httpMethod": "GET", ++ // "id": "compute.interconnectRemoteLocations.get", ++ // "parameterOrder": [ ++ // "project", ++ // "interconnectRemoteLocation" ++ // ], ++ // "parameters": { ++ // "interconnectRemoteLocation": { ++ // "description": "Name of the interconnect remote location to return.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", ++ // "response": { ++ // "$ref": "InterconnectRemoteLocation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// method id "compute.interconnectRemoteLocations.list": ++ ++type InterconnectRemoteLocationsListCall struct { ++ s *Service ++ project string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// List: Retrieves the list of interconnect remote locations available ++// to the specified project. ++// ++// - project: Project ID for this request. ++func (r *InterconnectRemoteLocationsService) List(project string) *InterconnectRemoteLocationsListCall { ++ c := &InterconnectRemoteLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ return c ++} ++ ++// Filter sets the optional parameter "filter": A filter expression that ++// filters resources listed in the response. Most Compute resources ++// support two types of filter expressions: expressions that support ++// regular expressions and expressions that follow API improvement ++// proposal AIP-160. If you want to use AIP-160, your expression must ++// specify the field name, an operator, and the value that you want to ++// use for filtering. The value must be a string, a number, or a ++// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` ++// or `:`. For example, if you are filtering Compute Engine instances, ++// you can exclude instances named `example-instance` by specifying ++// `name != example-instance`. The `:` operator can be used with string ++// fields to match substrings. For non-string fields it is equivalent to ++// the `=` operator. The `:*` comparison can be used to test whether a ++// key has been defined. For example, to find all objects with `owner` ++// label use: ``` labels.owner:* ``` You can also filter nested fields. ++// For example, you could specify `scheduling.automaticRestart = false` ++// to include instances only if they are not scheduled for automatic ++// restarts. You can use filtering on nested fields to filter based on ++// resource labels. To filter on multiple expressions, provide each ++// separate expression within parentheses. For example: ``` ++// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") ++// ``` By default, each expression is an `AND` expression. However, you ++// can include `AND` and `OR` expressions explicitly. For example: ``` ++// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") ++// AND (scheduling.automaticRestart = true) ``` If you want to use a ++// regular expression, use the `eq` (equal) or `ne` (not equal) operator ++// against a single un-parenthesized expression with or without quotes ++// or against multiple parenthesized expressions. Examples: `fieldname ++// eq unquoted literal` `fieldname eq 'single quoted literal'` ++// `fieldname eq "double quoted literal" `(fieldname1 eq literal) ++// (fieldname2 ne "literal")` The literal value is interpreted as a ++// regular expression using Google RE2 library syntax. The literal value ++// must match the entire field. For example, to filter for instances ++// that do not end with name "instance", you would use `name ne ++// .*instance`. ++func (c *InterconnectRemoteLocationsListCall) Filter(filter string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("filter", filter) ++ return c ++} ++ ++// MaxResults sets the optional parameter "maxResults": The maximum ++// number of results per page that should be returned. If the number of ++// available results is larger than `maxResults`, Compute Engine returns ++// a `nextPageToken` that can be used to get the next page of results in ++// subsequent list requests. Acceptable values are `0` to `500`, ++// inclusive. (Default: `500`) ++func (c *InterconnectRemoteLocationsListCall) MaxResults(maxResults int64) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) ++ return c ++} ++ ++// OrderBy sets the optional parameter "orderBy": Sorts list results by ++// a certain order. By default, results are returned in alphanumerical ++// order based on the resource name. You can also sort results in ++// descending order based on the creation timestamp using ++// `orderBy="creationTimestamp desc". This sorts results based on the ++// `creationTimestamp` field in reverse chronological order (newest ++// result first). Use this to sort resources like operations so that the ++// newest operation is returned first. Currently, only sorting by `name` ++// or `creationTimestamp desc` is supported. ++func (c *InterconnectRemoteLocationsListCall) OrderBy(orderBy string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("orderBy", orderBy) ++ return c ++} ++ ++// PageToken sets the optional parameter "pageToken": Specifies a page ++// token to use. Set `pageToken` to the `nextPageToken` returned by a ++// previous list request to get the next page of results. ++func (c *InterconnectRemoteLocationsListCall) PageToken(pageToken string) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("pageToken", pageToken) ++ return c ++} ++ ++// ReturnPartialSuccess sets the optional parameter ++// "returnPartialSuccess": Opt-in for partial success behavior which ++// provides partial results in case of failure. The default value is ++// false. ++func (c *InterconnectRemoteLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *InterconnectRemoteLocationsListCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsListCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *InterconnectRemoteLocationsListCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsListCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *InterconnectRemoteLocationsListCall) Context(ctx context.Context) *InterconnectRemoteLocationsListCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *InterconnectRemoteLocationsListCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *InterconnectRemoteLocationsListCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.interconnectRemoteLocations.list" call. ++// Exactly one of *InterconnectRemoteLocationList or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *InterconnectRemoteLocationList.ServerResponse.Header or (if a ++// response was returned at all) in error.(*googleapi.Error).Header. Use ++// googleapi.IsNotModified to check whether the returned error was ++// because http.StatusNotModified was returned. ++func (c *InterconnectRemoteLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocationList, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &InterconnectRemoteLocationList{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Retrieves the list of interconnect remote locations available to the specified project.", ++ // "flatPath": "projects/{project}/global/interconnectRemoteLocations", ++ // "httpMethod": "GET", ++ // "id": "compute.interconnectRemoteLocations.list", ++ // "parameterOrder": [ ++ // "project" ++ // ], ++ // "parameters": { ++ // "filter": { ++ // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "maxResults": { ++ // "default": "500", ++ // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", ++ // "format": "uint32", ++ // "location": "query", ++ // "minimum": "0", ++ // "type": "integer" ++ // }, ++ // "orderBy": { ++ // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "pageToken": { ++ // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "returnPartialSuccess": { ++ // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", ++ // "location": "query", ++ // "type": "boolean" ++ // } ++ // }, ++ // "path": "projects/{project}/global/interconnectRemoteLocations", ++ // "response": { ++ // "$ref": "InterconnectRemoteLocationList" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute", ++ // "https://www.googleapis.com/auth/compute.readonly" ++ // ] ++ // } ++ ++} ++ ++// Pages invokes f for each page of results. ++// A non-nil error returned from f will halt the iteration. ++// The provided context supersedes any context provided to the Context method. ++func (c *InterconnectRemoteLocationsListCall) Pages(ctx context.Context, f func(*InterconnectRemoteLocationList) error) error { ++ c.ctx_ = ctx ++ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point ++ for { ++ x, err := c.Do() ++ if err != nil { ++ return err ++ } ++ if err := f(x); err != nil { ++ return err ++ } ++ if x.NextPageToken == "" { ++ return nil ++ } ++ c.PageToken(x.NextPageToken) ++ } ++} ++ + // method id "compute.interconnects.delete": + + type InterconnectsDeleteCall struct { +@@ -125309,6 +127331,196 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera + + } + ++// method id "compute.nodeGroups.simulateMaintenanceEvent": ++ ++type NodeGroupsSimulateMaintenanceEventCall struct { ++ s *Service ++ project string ++ zone string ++ nodeGroup string ++ nodegroupssimulatemaintenanceeventrequest *NodeGroupsSimulateMaintenanceEventRequest ++ urlParams_ gensupport.URLParams ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// SimulateMaintenanceEvent: Simulates maintenance event on specified ++// nodes from the node group. ++// ++// - nodeGroup: Name of the NodeGroup resource whose nodes will go under ++// maintenance simulation. ++// - project: Project ID for this request. ++// - zone: The name of the zone for this request. ++func (r *NodeGroupsService) SimulateMaintenanceEvent(project string, zone string, nodeGroup string, nodegroupssimulatemaintenanceeventrequest *NodeGroupsSimulateMaintenanceEventRequest) *NodeGroupsSimulateMaintenanceEventCall { ++ c := &NodeGroupsSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.project = project ++ c.zone = zone ++ c.nodeGroup = nodeGroup ++ c.nodegroupssimulatemaintenanceeventrequest = nodegroupssimulatemaintenanceeventrequest ++ return c ++} ++ ++// RequestId sets the optional parameter "requestId": An optional ++// request ID to identify requests. Specify a unique request ID so that ++// if you must retry your request, the server will know to ignore the ++// request if it has already been completed. For example, consider a ++// situation where you make an initial request and the request times ++// out. If you make the request again with the same request ID, the ++// server can check if original operation with the same request ID was ++// received, and if so, will ignore the second request. This prevents ++// clients from accidentally creating duplicate commitments. The request ++// ID must be a valid UUID with the exception that zero UUID is not ++// supported ( 00000000-0000-0000-0000-000000000000). ++func (c *NodeGroupsSimulateMaintenanceEventCall) RequestId(requestId string) *NodeGroupsSimulateMaintenanceEventCall { ++ c.urlParams_.Set("requestId", requestId) ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *NodeGroupsSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *NodeGroupsSimulateMaintenanceEventCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *NodeGroupsSimulateMaintenanceEventCall) Context(ctx context.Context) *NodeGroupsSimulateMaintenanceEventCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *NodeGroupsSimulateMaintenanceEventCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *NodeGroupsSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ var body io.Reader = nil ++ body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssimulatemaintenanceeventrequest) ++ if err != nil { ++ return nil, err ++ } ++ reqHeaders.Set("Content-Type", "application/json") ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("POST", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "project": c.project, ++ "zone": c.zone, ++ "nodeGroup": c.nodeGroup, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "compute.nodeGroups.simulateMaintenanceEvent" call. ++// Exactly one of *Operation or error will be non-nil. Any non-2xx ++// status code is an error. Response headers are in either ++// *Operation.ServerResponse.Header or (if a response was returned at ++// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified ++// to check whether the returned error was because ++// http.StatusNotModified was returned. ++func (c *NodeGroupsSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &Operation{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Simulates maintenance event on specified nodes from the node group.", ++ // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", ++ // "httpMethod": "POST", ++ // "id": "compute.nodeGroups.simulateMaintenanceEvent", ++ // "parameterOrder": [ ++ // "project", ++ // "zone", ++ // "nodeGroup" ++ // ], ++ // "parameters": { ++ // "nodeGroup": { ++ // "description": "Name of the NodeGroup resource whose nodes will go under maintenance simulation.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "project": { ++ // "description": "Project ID for this request.", ++ // "location": "path", ++ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", ++ // "required": true, ++ // "type": "string" ++ // }, ++ // "requestId": { ++ // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", ++ // "location": "query", ++ // "type": "string" ++ // }, ++ // "zone": { ++ // "description": "The name of the zone for this request.", ++ // "location": "path", ++ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", ++ // "request": { ++ // "$ref": "NodeGroupsSimulateMaintenanceEventRequest" ++ // }, ++ // "response": { ++ // "$ref": "Operation" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform", ++ // "https://www.googleapis.com/auth/compute" ++ // ] ++ // } ++ ++} ++ + // method id "compute.nodeGroups.testIamPermissions": + + type NodeGroupsTestIamPermissionsCall struct { +@@ -167365,6 +169577,15 @@ func (c *RoutersGetNatMappingInfoCall) MaxResults(maxResults int64) *RoutersGetN + return c + } + ++// NatName sets the optional parameter "natName": Name of the nat ++// service to filter the Nat Mapping information. If it is omitted, all ++// nats for this router will be returned. Name should conform to ++// RFC1035. ++func (c *RoutersGetNatMappingInfoCall) NatName(natName string) *RoutersGetNatMappingInfoCall { ++ c.urlParams_.Set("natName", natName) ++ return c ++} ++ + // OrderBy sets the optional parameter "orderBy": Sorts list results by + // a certain order. By default, results are returned in alphanumerical + // order based on the resource name. You can also sort results in +@@ -167520,6 +169741,11 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp + // "minimum": "0", + // "type": "integer" + // }, ++ // "natName": { ++ // "description": "Name of the nat service to filter the Nat Mapping information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", ++ // "location": "query", ++ // "type": "string" ++ // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", +diff --git a/vendor/google.golang.org/api/container/v1/container-api.json b/vendor/google.golang.org/api/container/v1/container-api.json +index 2569981131b..0f33192ba35 100644 +--- a/vendor/google.golang.org/api/container/v1/container-api.json ++++ b/vendor/google.golang.org/api/container/v1/container-api.json +@@ -197,6 +197,31 @@ + "resources": { + "clusters": { + "methods": { ++ "checkAutopilotCompatibility": { ++ "description": "Checks the cluster compatibility with Autopilot mode, and returns a list of compatibility issues.", ++ "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:checkAutopilotCompatibility", ++ "httpMethod": "GET", ++ "id": "container.projects.locations.clusters.checkAutopilotCompatibility", ++ "parameterOrder": [ ++ "name" ++ ], ++ "parameters": { ++ "name": { ++ "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", ++ "location": "path", ++ "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", ++ "required": true, ++ "type": "string" ++ } ++ }, ++ "path": "v1/{+name}:checkAutopilotCompatibility", ++ "response": { ++ "$ref": "CheckAutopilotCompatibilityResponse" ++ }, ++ "scopes": [ ++ "https://www.googleapis.com/auth/cloud-platform" ++ ] ++ }, + "completeIpRotation": { + "description": "Completes master IP rotation.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:completeIpRotation", +@@ -447,6 +472,7 @@ + ] + }, + "setLocations": { ++ "deprecated": true, + "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", + "httpMethod": "POST", +@@ -1517,6 +1543,7 @@ + ] + }, + "locations": { ++ "deprecated": true, + "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", + "httpMethod": "POST", +@@ -2487,7 +2514,7 @@ + } + } + }, +- "revision": "20230222", ++ "revision": "20230519", + "rootUrl": "https://container.googleapis.com/", + "schemas": { + "AcceleratorConfig": { +@@ -2514,6 +2541,20 @@ + }, + "type": "object" + }, ++ "AdditionalPodRangesConfig": { ++ "description": "AdditionalPodRangesConfig is the configuration for additional pod secondary ranges supporting the ClusterUpdate message.", ++ "id": "AdditionalPodRangesConfig", ++ "properties": { ++ "podRangeNames": { ++ "description": "Name for pod secondary ipv4 range which has the actual range defined ahead.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "AddonsConfig": { + "description": "Configuration for the addons that can be automatically spun up in the cluster, enabling additional functionality.", + "id": "AddonsConfig", +@@ -2614,6 +2655,53 @@ + }, + "type": "object" + }, ++ "AutopilotCompatibilityIssue": { ++ "description": "AutopilotCompatibilityIssue contains information about a specific compatibility issue with Autopilot mode.", ++ "id": "AutopilotCompatibilityIssue", ++ "properties": { ++ "constraintType": { ++ "description": "The constraint type of the issue.", ++ "type": "string" ++ }, ++ "description": { ++ "description": "The description of the issue.", ++ "type": "string" ++ }, ++ "documentationUrl": { ++ "description": "A URL to a public documnetation, which addresses resolving this issue.", ++ "type": "string" ++ }, ++ "incompatibilityType": { ++ "description": "The incompatibility type of this issue.", ++ "enum": [ ++ "UNSPECIFIED", ++ "INCOMPATIBILITY", ++ "ADDITIONAL_CONFIG_REQUIRED", ++ "PASSED_WITH_OPTIONAL_CONFIG" ++ ], ++ "enumDescriptions": [ ++ "Default value, should not be used.", ++ "Indicates that the issue is a known incompatibility between the cluster and Autopilot mode.", ++ "Indicates the issue is an incompatibility if customers take no further action to resolve.", ++ "Indicates the issue is not an incompatibility, but depending on the workloads business logic, there is a potential that they won't work on Autopilot." ++ ], ++ "type": "string" ++ }, ++ "lastObservation": { ++ "description": "The last time when this issue was observed.", ++ "format": "google-datetime", ++ "type": "string" ++ }, ++ "subjects": { ++ "description": "The name of the resources which are subject to this issue.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "AutoprovisioningNodePoolDefaults": { + "description": "AutoprovisioningNodePoolDefaults contains defaults for a node pool created by NAP.", + "id": "AutoprovisioningNodePoolDefaults", +@@ -2665,6 +2753,22 @@ + }, + "type": "object" + }, ++ "BestEffortProvisioning": { ++ "description": "Best effort provisioning.", ++ "id": "BestEffortProvisioning", ++ "properties": { ++ "enabled": { ++ "description": "When this is enabled, cluster/node pool creations will ignore non-fatal errors like stockout to best provision as many nodes as possible right now and eventually bring up all target number of nodes", ++ "type": "boolean" ++ }, ++ "minProvisionNodes": { ++ "description": "Minimum number of nodes to be provisioned to be considered as succeeded, and the rest of nodes will be provisioned gradually and eventually when stockout issue has been resolved.", ++ "format": "int32", ++ "type": "integer" ++ } ++ }, ++ "type": "object" ++ }, + "BigQueryDestination": { + "description": "Parameters for using BigQuery as the destination of resource usage export.", + "id": "BigQueryDestination", +@@ -2793,6 +2897,24 @@ + }, + "type": "object" + }, ++ "CheckAutopilotCompatibilityResponse": { ++ "description": "CheckAutopilotCompatibilityResponse has a list of compatibility issues.", ++ "id": "CheckAutopilotCompatibilityResponse", ++ "properties": { ++ "issues": { ++ "description": "The list of issues for the given operation.", ++ "items": { ++ "$ref": "AutopilotCompatibilityIssue" ++ }, ++ "type": "array" ++ }, ++ "summary": { ++ "description": "The summary of the autopilot compatibility response.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "CidrBlock": { + "description": "CidrBlock contains an optional name and one CIDR block.", + "id": "CidrBlock", +@@ -2916,6 +3038,10 @@ + "description": "An optional description of this cluster.", + "type": "string" + }, ++ "enableK8sBetaApis": { ++ "$ref": "K8sBetaAPIConfig", ++ "description": "Beta APIs Config" ++ }, + "enableKubernetesAlpha": { + "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha API groups (e.g. v1alpha1) and features that may not be production ready in the kubernetes version of the master and nodes. The cluster has no SLA for uptime and master/node upgrades are disabled. Alpha enabled clusters are automatically deleted thirty days after creation.", + "type": "boolean" +@@ -2936,6 +3062,10 @@ + "description": "[Output only] The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "type": "string" + }, ++ "fleet": { ++ "$ref": "Fleet", ++ "description": "Fleet information for the cluster." ++ }, + "id": { + "description": "Output only. Unique id for the cluster.", + "readOnly": true, +@@ -3066,7 +3196,7 @@ + }, + "releaseChannel": { + "$ref": "ReleaseChannel", +- "description": "Release channel configuration." ++ "description": "Release channel configuration. If left unspecified on cluster creation and a version is specified, the cluster is enrolled in the most mature release channel where the version is available (first checking STABLE, then REGULAR, and finally RAPID). Otherwise, if no release channel configuration and no version is specified, the cluster is enrolled in the REGULAR channel with its default version." + }, + "resourceLabels": { + "additionalProperties": { +@@ -3187,6 +3317,10 @@ + "description": "ClusterUpdate describes an update to the cluster. Exactly one update can be applied to a cluster with each request, so at most one field can be provided.", + "id": "ClusterUpdate", + "properties": { ++ "additionalPodRangesConfig": { ++ "$ref": "AdditionalPodRangesConfig", ++ "description": "The additional pod ranges to be added to the cluster. These pod ranges can be used by node pools to allocate pod IPs." ++ }, + "desiredAddonsConfig": { + "$ref": "AddonsConfig", + "description": "Configurations for the various addons available to run in the cluster." +@@ -3233,10 +3367,18 @@ + "$ref": "DNSConfig", + "description": "DNSConfig contains clusterDNS config for this cluster." + }, ++ "desiredEnableFqdnNetworkPolicy": { ++ "description": "Enable/Disable FQDN Network Policy for the cluster.", ++ "type": "boolean" ++ }, + "desiredEnablePrivateEndpoint": { + "description": "Enable/Disable private endpoint for the cluster's master.", + "type": "boolean" + }, ++ "desiredFleet": { ++ "$ref": "Fleet", ++ "description": "The desired fleet configuration for the cluster." ++ }, + "desiredGatewayApiConfig": { + "$ref": "GatewayAPIConfig", + "description": "The desired config of Gateway API on this cluster." +@@ -3257,6 +3399,10 @@ + "$ref": "IntraNodeVisibilityConfig", + "description": "The desired config of Intra-node visibility." + }, ++ "desiredK8sBetaApis": { ++ "$ref": "K8sBetaAPIConfig", ++ "description": "Desired Beta APIs to be enabled for cluster." ++ }, + "desiredL4ilbSubsettingConfig": { + "$ref": "ILBSubsettingConfig", + "description": "The desired L4 Internal Load Balancer Subsetting configuration." +@@ -3378,9 +3524,17 @@ + "$ref": "WorkloadIdentityConfig", + "description": "Configuration for Workload Identity." + }, ++ "enableK8sBetaApis": { ++ "$ref": "K8sBetaAPIConfig", ++ "description": "Kubernetes open source beta apis enabled on the cluster. Only beta apis" ++ }, + "etag": { + "description": "The current etag of the cluster. If an etag is provided and does not match the current etag of the cluster, update will be blocked and an ABORTED error will be returned.", + "type": "string" ++ }, ++ "removedAdditionalPodRangesConfig": { ++ "$ref": "AdditionalPodRangesConfig", ++ "description": "The additional pod ranges that are to be removed from the cluster. The pod ranges specified here must have been specified earlier in the 'additional_pod_ranges_config' argument." + } + }, + "type": "object" +@@ -3571,7 +3725,7 @@ + "type": "string" + }, + "state": { +- "description": "Denotes the state of etcd encryption.", ++ "description": "The desired state of etcd encryption.", + "enum": [ + "UNKNOWN", + "ENCRYPTED", +@@ -3664,6 +3818,25 @@ + }, + "type": "object" + }, ++ "Fleet": { ++ "description": "Fleet is the fleet configuration for the cluster.", ++ "id": "Fleet", ++ "properties": { ++ "membership": { ++ "description": "[Output only] The full resource name of the registered fleet membership of the cluster, in the format `//gkehub.googleapis.com/projects/*/locations/*/memberships/*`.", ++ "type": "string" ++ }, ++ "preRegistered": { ++ "description": "[Output only] Whether the cluster has been registered through the fleet API.", ++ "type": "boolean" ++ }, ++ "project": { ++ "description": "The Fleet host project(project ID or project number) where this cluster will be registered to. This field cannot be changed after the cluster has been registered.", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "GPUSharingConfig": { + "description": "GPUSharingConfig represents the GPU sharing configuration for Hardware Accelerators.", + "id": "GPUSharingConfig", +@@ -3884,6 +4057,11 @@ + "description": "Configuration for controlling how IPs are allocated in the cluster.", + "id": "IPAllocationPolicy", + "properties": { ++ "additionalPodRangesConfig": { ++ "$ref": "AdditionalPodRangesConfig", ++ "description": "Output only. [Output only] The additional pod ranges that are added to the cluster. These pod ranges can be used by new node pools to allocate pod IPs automatically. Once the range is removed it will not show up in IPAllocationPolicy.", ++ "readOnly": true ++ }, + "clusterIpv4Cidr": { + "description": "This field is deprecated, use cluster_ipv4_cidr_block.", + "type": "string" +@@ -3922,6 +4100,10 @@ + "description": "The IP address range of the instance IPs in this cluster. This is applicable only if `create_subnetwork` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", + "type": "string" + }, ++ "podCidrOverprovisionConfig": { ++ "$ref": "PodCIDROverprovisionConfig", ++ "description": "[PRIVATE FIELD] Pod CIDR size overprovisioning config for the cluster. Pod CIDR size per node depends on max_pods_per_node. By default, the value of max_pods_per_node is doubled and then rounded off to next power of 2 to get the size of pod CIDR block per node. Example: max_pods_per_node of 30 would result in 64 IPs (/26). This config can disable the doubling of IPs (we still round off to next power of 2) Example: max_pods_per_node of 30 will result in 32 IPs (/27) when overprovisioning is disabled." ++ }, + "servicesIpv4Cidr": { + "description": "This field is deprecated, use services_ipv4_cidr_block.", + "type": "string" +@@ -4042,6 +4224,20 @@ + }, + "type": "object" + }, ++ "K8sBetaAPIConfig": { ++ "description": "K8sBetaAPIConfig , configuration for beta APIs", ++ "id": "K8sBetaAPIConfig", ++ "properties": { ++ "enabledApis": { ++ "description": "Enabled k8s beta APIs.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "KubernetesDashboard": { + "description": "Configuration for the Kubernetes Dashboard.", + "id": "KubernetesDashboard", +@@ -4479,6 +4675,10 @@ + "$ref": "DNSConfig", + "description": "DNSConfig contains clusterDNS config for this cluster." + }, ++ "enableFqdnNetworkPolicy": { ++ "description": "Whether FQDN Network Policy is enabled on this cluster.", ++ "type": "boolean" ++ }, + "enableIntraNodeVisibility": { + "description": "Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.", + "type": "boolean" +@@ -4589,6 +4789,38 @@ + }, + "type": "object" + }, ++ "NodeAffinity": { ++ "description": "Specifies the NodeAffinity key, values, and affinity operator according to [shared sole tenant node group affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity).", ++ "id": "NodeAffinity", ++ "properties": { ++ "key": { ++ "description": "Key for NodeAffinity.", ++ "type": "string" ++ }, ++ "operator": { ++ "description": "Operator for NodeAffinity.", ++ "enum": [ ++ "OPERATOR_UNSPECIFIED", ++ "IN", ++ "NOT_IN" ++ ], ++ "enumDescriptions": [ ++ "Invalid or unspecified affinity operator.", ++ "Affinity operator.", ++ "Anti-affinity operator." ++ ], ++ "type": "string" ++ }, ++ "values": { ++ "description": "Values for NodeAffinity.", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "NodeConfig": { + "description": "Parameters that describe the nodes in a cluster. GKE Autopilot clusters do not recognize parameters in `NodeConfig`. Use AutoprovisioningNodePoolDefaults instead.", + "id": "NodeConfig", +@@ -4722,6 +4954,10 @@ + "$ref": "ShieldedInstanceConfig", + "description": "Shielded Instance options." + }, ++ "soleTenantConfig": { ++ "$ref": "SoleTenantConfig", ++ "description": "Parameters for node pools to be backed by shared sole tenant node groups." ++ }, + "spot": { + "description": "Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.", + "type": "boolean" +@@ -4839,6 +5075,10 @@ + "$ref": "NetworkPerformanceConfig", + "description": "Network bandwidth tier configuration." + }, ++ "podCidrOverprovisionConfig": { ++ "$ref": "PodCIDROverprovisionConfig", ++ "description": "[PRIVATE FIELD] Pod CIDR size overprovisioning config for the nodepool. Pod CIDR size per node depends on max_pods_per_node. By default, the value of max_pods_per_node is rounded off to next power of 2 and we then double that to get the size of pod CIDR block per node. Example: max_pods_per_node of 30 would result in 64 IPs (/26). This config can disable the doubling of IPs (we still round off to next power of 2) Example: max_pods_per_node of 30 will result in 32 IPs (/27) when overprovisioning is disabled." ++ }, + "podIpv4CidrBlock": { + "description": "The IP address range for pod IPs in this node pool. Only applicable if `create_pod_range` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) to pick a specific range to use. Only applicable if `ip_allocation_policy.use_ip_aliases` is true. This field cannot be changed after the node pool has been created.", + "type": "string" +@@ -4858,6 +5098,10 @@ + "$ref": "NodePoolAutoscaling", + "description": "Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present." + }, ++ "bestEffortProvisioning": { ++ "$ref": "BestEffortProvisioning", ++ "description": "Enable best effort provisioning for nodes" ++ }, + "conditions": { + "description": "Which conditions caused the current node pool state.", + "items": { +@@ -5158,26 +5402,28 @@ + "SET_MASTER_AUTH", + "SET_NODE_POOL_SIZE", + "SET_NETWORK_POLICY", +- "SET_MAINTENANCE_POLICY" ++ "SET_MAINTENANCE_POLICY", ++ "RESIZE_CLUSTER" + ], + "enumDescriptions": [ + "Not set.", +- "Cluster create.", +- "Cluster delete.", +- "A master upgrade.", +- "A node upgrade.", +- "Cluster repair.", +- "Cluster update.", +- "Node pool create.", +- "Node pool delete.", +- "Set node pool management.", +- "Automatic node pool repair.", +- "Automatic node upgrade.", +- "Set labels.", +- "Set/generate master auth materials", +- "Set node pool size.", +- "Updates network policy for a cluster.", +- "Set the maintenance policy." ++ "The cluster is being created. The cluster should be assumed to be unusable until the operation finishes. In the event of the operation failing, the cluster will enter the ERROR state and eventually be deleted.", ++ "The cluster is being deleted. The cluster should be assumed to be unusable as soon as this operation starts. In the event of the operation failing, the cluster will enter the ERROR state and the deletion will be automatically retried until completed.", ++ "The cluster version is being updated. Note that this includes \"upgrades\" to the same version, which are simply a recreation. This also includes [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-upgrades#upgrading_automatically). For more details, see [documentation on cluster upgrades](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-upgrades#cluster_upgrades).", ++ "A node pool is being updated. Despite calling this an \"upgrade\", this includes most forms of updates to node pools. This also includes [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-upgrades). This operation sets the progress field and may be canceled. The upgrade strategy depends on [node pool configuration](https://cloud.google.com/kubernetes-engine/docs/concepts/node-pool-upgrade-strategies). The nodes are generally still usable during this operation.", ++ "A problem has been detected with the control plane and is being repaired. This operation type is initiated by GKE. For more details, see [documentation on repairs](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions#repairs).", ++ "The cluster is being updated. This is a broad category of operations and includes operations that only change metadata as well as those that must recreate the entire cluster. If the control plane must be recreated, this will cause temporary downtime for zonal clusters. Some features require recreating the nodes as well. Those will be recreated as separate operations and the update may not be completely functional until the node pools recreations finish. Node recreations will generally follow [maintenance policies](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions). Some GKE-initiated operations use this type. This includes certain types of auto-upgrades and incident mitigations.", ++ "A node pool is being created. The node pool should be assumed to be unusable until this operation finishes. In the event of an error, the node pool may be partially created. If enabled, [node autoprovisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning) may have automatically initiated such operations.", ++ "The node pool is being deleted. The node pool should be assumed to be unusable as soon as this operation starts.", ++ "The node pool's manamagent field is being updated. These operations only update metadata and may be concurrent with most other operations.", ++ "A problem has been detected with nodes and [they are being repaired](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-repair). This operation type is initiated by GKE, typically automatically. This operation may be concurrent with other operations and there may be multiple repairs occurring on the same node pool.", ++ "Unused. Automatic node upgrade uses UPGRADE_NODES.", ++ "Unused. Updating labels uses UPDATE_CLUSTER.", ++ "Unused. Updating master auth uses UPDATE_CLUSTER.", ++ "The node pool is being resized. With the exception of resizing to or from size zero, the node pool is generally usable during this operation.", ++ "Unused. Updating network policy uses UPDATE_CLUSTER.", ++ "Unused. Updating maintenance policy uses UPDATE_CLUSTER.", ++ "The control plane is being resized. This operation type is initiated by GKE. These operations are often performed preemptively to ensure that the control plane has sufficient resources and is not typically an indication of issues. For more details, see [documentation on resizes](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions#repairs)." + ], + "type": "string" + }, +@@ -5187,7 +5433,7 @@ + "readOnly": true + }, + "selfLink": { +- "description": "Server-defined URL for the resource.", ++ "description": "Server-defined URI for the operation. Example: `https://container.googleapis.com/v1alpha1/projects/123/locations/us-central1/operations/operation-123`.", + "type": "string" + }, + "startTime": { +@@ -5218,7 +5464,7 @@ + "type": "string" + }, + "targetLink": { +- "description": "Server-defined URL for the target of the operation.", ++ "description": "Server-defined URI for the target of the operation. The format of this is a URI to the resource being modified (such as a cluster, node pool, or node). For node pool repairs, there may be multiple nodes being repaired, but only one will be the target. Examples: - ## `https://container.googleapis.com/v1/projects/123/locations/us-central1/clusters/my-cluster` ## `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/clusters/my-cluster/nodePools/my-np` `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/clusters/my-cluster/nodePools/my-np/node/my-node`", + "type": "string" + }, + "zone": { +@@ -5290,6 +5536,17 @@ + }, + "type": "object" + }, ++ "PodCIDROverprovisionConfig": { ++ "description": "[PRIVATE FIELD] Config for pod CIDR size overprovisioning.", ++ "id": "PodCIDROverprovisionConfig", ++ "properties": { ++ "disable": { ++ "description": "Whether Pod CIDR overprovisioning is disabled. Note: Pod CIDR overprovisioning is enabled by default.", ++ "type": "boolean" ++ } ++ }, ++ "type": "object" ++ }, + "PrivateClusterConfig": { + "description": "Configuration options for private clusters.", + "id": "PrivateClusterConfig", +@@ -6068,6 +6325,20 @@ + }, + "type": "object" + }, ++ "SoleTenantConfig": { ++ "description": "SoleTenantConfig contains the NodeAffinities to specify what shared sole tenant node groups should back the node pool.", ++ "id": "SoleTenantConfig", ++ "properties": { ++ "nodeAffinities": { ++ "description": "NodeAffinities used to match to a shared sole tenant node group.", ++ "items": { ++ "$ref": "NodeAffinity" ++ }, ++ "type": "array" ++ } ++ }, ++ "type": "object" ++ }, + "StandardRolloutPolicy": { + "description": "Standard rollout policy is the default policy for blue-green.", + "id": "StandardRolloutPolicy", +@@ -6575,7 +6846,7 @@ + "enumDescriptions": [ + "UNKNOWN is the zero value of the Status enum. It's not a valid status.", + "UNUSED denotes that this range is unclaimed by any cluster.", +- "IN_USE_SERVICE denotes that this range is claimed by a cluster for services. It cannot be used for other clusters.", ++ "IN_USE_SERVICE denotes that this range is claimed by cluster(s) for services. User-managed services range can be shared between clusters within the same subnetwork.", + "IN_USE_SHAREABLE_POD denotes this range was created by the network admin and is currently claimed by a cluster for pods. It can only be used by other clusters as a pod range.", + "IN_USE_MANAGED_POD denotes this range was created by GKE and is claimed for pods. It cannot be used for other clusters." + ], +diff --git a/vendor/google.golang.org/api/container/v1/container-gen.go b/vendor/google.golang.org/api/container/v1/container-gen.go +index 675d4a13ba4..0982b905144 100644 +--- a/vendor/google.golang.org/api/container/v1/container-gen.go ++++ b/vendor/google.golang.org/api/container/v1/container-gen.go +@@ -71,6 +71,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "container:v1" + const apiName = "container" +@@ -322,6 +323,37 @@ func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// AdditionalPodRangesConfig: AdditionalPodRangesConfig is the ++// configuration for additional pod secondary ranges supporting the ++// ClusterUpdate message. ++type AdditionalPodRangesConfig struct { ++ // PodRangeNames: Name for pod secondary ipv4 range which has the actual ++ // range defined ahead. ++ PodRangeNames []string `json:"podRangeNames,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "PodRangeNames") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "PodRangeNames") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *AdditionalPodRangesConfig) MarshalJSON() ([]byte, error) { ++ type NoMethod AdditionalPodRangesConfig ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // AddonsConfig: Configuration for the addons that can be automatically + // spun up in the cluster, enabling additional functionality. + type AddonsConfig struct { +@@ -531,6 +563,62 @@ func (s *Autopilot) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// AutopilotCompatibilityIssue: AutopilotCompatibilityIssue contains ++// information about a specific compatibility issue with Autopilot mode. ++type AutopilotCompatibilityIssue struct { ++ // ConstraintType: The constraint type of the issue. ++ ConstraintType string `json:"constraintType,omitempty"` ++ ++ // Description: The description of the issue. ++ Description string `json:"description,omitempty"` ++ ++ // DocumentationUrl: A URL to a public documnetation, which addresses ++ // resolving this issue. ++ DocumentationUrl string `json:"documentationUrl,omitempty"` ++ ++ // IncompatibilityType: The incompatibility type of this issue. ++ // ++ // Possible values: ++ // "UNSPECIFIED" - Default value, should not be used. ++ // "INCOMPATIBILITY" - Indicates that the issue is a known ++ // incompatibility between the cluster and Autopilot mode. ++ // "ADDITIONAL_CONFIG_REQUIRED" - Indicates the issue is an ++ // incompatibility if customers take no further action to resolve. ++ // "PASSED_WITH_OPTIONAL_CONFIG" - Indicates the issue is not an ++ // incompatibility, but depending on the workloads business logic, there ++ // is a potential that they won't work on Autopilot. ++ IncompatibilityType string `json:"incompatibilityType,omitempty"` ++ ++ // LastObservation: The last time when this issue was observed. ++ LastObservation string `json:"lastObservation,omitempty"` ++ ++ // Subjects: The name of the resources which are subject to this issue. ++ Subjects []string `json:"subjects,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "ConstraintType") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "ConstraintType") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *AutopilotCompatibilityIssue) MarshalJSON() ([]byte, error) { ++ type NoMethod AutopilotCompatibilityIssue ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // AutoprovisioningNodePoolDefaults: AutoprovisioningNodePoolDefaults + // contains defaults for a node pool created by NAP. + type AutoprovisioningNodePoolDefaults struct { +@@ -613,6 +701,42 @@ func (s *AutoprovisioningNodePoolDefaults) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// BestEffortProvisioning: Best effort provisioning. ++type BestEffortProvisioning struct { ++ // Enabled: When this is enabled, cluster/node pool creations will ++ // ignore non-fatal errors like stockout to best provision as many nodes ++ // as possible right now and eventually bring up all target number of ++ // nodes ++ Enabled bool `json:"enabled,omitempty"` ++ ++ // MinProvisionNodes: Minimum number of nodes to be provisioned to be ++ // considered as succeeded, and the rest of nodes will be provisioned ++ // gradually and eventually when stockout issue has been resolved. ++ MinProvisionNodes int64 `json:"minProvisionNodes,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Enabled") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Enabled") to include in ++ // API requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *BestEffortProvisioning) MarshalJSON() ([]byte, error) { ++ type NoMethod BestEffortProvisioning ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // BigQueryDestination: Parameters for using BigQuery as the destination + // of resource usage export. + type BigQueryDestination struct { +@@ -826,6 +950,43 @@ func (s *CancelOperationRequest) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// CheckAutopilotCompatibilityResponse: ++// CheckAutopilotCompatibilityResponse has a list of compatibility ++// issues. ++type CheckAutopilotCompatibilityResponse struct { ++ // Issues: The list of issues for the given operation. ++ Issues []*AutopilotCompatibilityIssue `json:"issues,omitempty"` ++ ++ // Summary: The summary of the autopilot compatibility response. ++ Summary string `json:"summary,omitempty"` ++ ++ // ServerResponse contains the HTTP response code and headers from the ++ // server. ++ googleapi.ServerResponse `json:"-"` ++ ++ // ForceSendFields is a list of field names (e.g. "Issues") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Issues") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *CheckAutopilotCompatibilityResponse) MarshalJSON() ([]byte, error) { ++ type NoMethod CheckAutopilotCompatibilityResponse ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // CidrBlock: CidrBlock contains an optional name and one CIDR block. + type CidrBlock struct { + // CidrBlock: cidr_block must be specified in CIDR notation. +@@ -998,6 +1159,9 @@ type Cluster struct { + // Description: An optional description of this cluster. + Description string `json:"description,omitempty"` + ++ // EnableK8sBetaApis: Beta APIs Config ++ EnableK8sBetaApis *K8sBetaAPIConfig `json:"enableK8sBetaApis,omitempty"` ++ + // EnableKubernetesAlpha: Kubernetes alpha features are enabled on this + // cluster. This includes alpha API groups (e.g. v1alpha1) and features + // that may not be production ready in the kubernetes version of the +@@ -1025,6 +1189,9 @@ type Cluster struct { + // format. + ExpireTime string `json:"expireTime,omitempty"` + ++ // Fleet: Fleet information for the cluster. ++ Fleet *Fleet `json:"fleet,omitempty"` ++ + // Id: Output only. Unique id for the cluster. + Id string `json:"id,omitempty"` + +@@ -1190,7 +1357,12 @@ type Cluster struct { + // PrivateClusterConfig: Configuration for private cluster. + PrivateClusterConfig *PrivateClusterConfig `json:"privateClusterConfig,omitempty"` + +- // ReleaseChannel: Release channel configuration. ++ // ReleaseChannel: Release channel configuration. If left unspecified on ++ // cluster creation and a version is specified, the cluster is enrolled ++ // in the most mature release channel where the version is available ++ // (first checking STABLE, then REGULAR, and finally RAPID). Otherwise, ++ // if no release channel configuration and no version is specified, the ++ // cluster is enrolled in the REGULAR channel with its default version. + ReleaseChannel *ReleaseChannel `json:"releaseChannel,omitempty"` + + // ResourceLabels: The resource labels for the cluster to use to +@@ -1352,6 +1524,11 @@ func (s *ClusterAutoscaling) MarshalJSON() ([]byte, error) { + // Exactly one update can be applied to a cluster with each request, so + // at most one field can be provided. + type ClusterUpdate struct { ++ // AdditionalPodRangesConfig: The additional pod ranges to be added to ++ // the cluster. These pod ranges can be used by node pools to allocate ++ // pod IPs. ++ AdditionalPodRangesConfig *AdditionalPodRangesConfig `json:"additionalPodRangesConfig,omitempty"` ++ + // DesiredAddonsConfig: Configurations for the various addons available + // to run in the cluster. + DesiredAddonsConfig *AddonsConfig `json:"desiredAddonsConfig,omitempty"` +@@ -1395,10 +1572,17 @@ type ClusterUpdate struct { + // cluster. + DesiredDnsConfig *DNSConfig `json:"desiredDnsConfig,omitempty"` + ++ // DesiredEnableFqdnNetworkPolicy: Enable/Disable FQDN Network Policy ++ // for the cluster. ++ DesiredEnableFqdnNetworkPolicy bool `json:"desiredEnableFqdnNetworkPolicy,omitempty"` ++ + // DesiredEnablePrivateEndpoint: Enable/Disable private endpoint for the + // cluster's master. + DesiredEnablePrivateEndpoint bool `json:"desiredEnablePrivateEndpoint,omitempty"` + ++ // DesiredFleet: The desired fleet configuration for the cluster. ++ DesiredFleet *Fleet `json:"desiredFleet,omitempty"` ++ + // DesiredGatewayApiConfig: The desired config of Gateway API on this + // cluster. + DesiredGatewayApiConfig *GatewayAPIConfig `json:"desiredGatewayApiConfig,omitempty"` +@@ -1418,6 +1602,9 @@ type ClusterUpdate struct { + // visibility. + DesiredIntraNodeVisibilityConfig *IntraNodeVisibilityConfig `json:"desiredIntraNodeVisibilityConfig,omitempty"` + ++ // DesiredK8sBetaApis: Desired Beta APIs to be enabled for cluster. ++ DesiredK8sBetaApis *K8sBetaAPIConfig `json:"desiredK8sBetaApis,omitempty"` ++ + // DesiredL4ilbSubsettingConfig: The desired L4 Internal Load Balancer + // Subsetting configuration. + DesiredL4ilbSubsettingConfig *ILBSubsettingConfig `json:"desiredL4ilbSubsettingConfig,omitempty"` +@@ -1560,26 +1747,37 @@ type ClusterUpdate struct { + // DesiredWorkloadIdentityConfig: Configuration for Workload Identity. + DesiredWorkloadIdentityConfig *WorkloadIdentityConfig `json:"desiredWorkloadIdentityConfig,omitempty"` + ++ // EnableK8sBetaApis: Kubernetes open source beta apis enabled on the ++ // cluster. Only beta apis ++ EnableK8sBetaApis *K8sBetaAPIConfig `json:"enableK8sBetaApis,omitempty"` ++ + // Etag: The current etag of the cluster. If an etag is provided and + // does not match the current etag of the cluster, update will be + // blocked and an ABORTED error will be returned. + Etag string `json:"etag,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "DesiredAddonsConfig") +- // to unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. ++ // RemovedAdditionalPodRangesConfig: The additional pod ranges that are ++ // to be removed from the cluster. The pod ranges specified here must ++ // have been specified earlier in the 'additional_pod_ranges_config' ++ // argument. ++ RemovedAdditionalPodRangesConfig *AdditionalPodRangesConfig `json:"removedAdditionalPodRangesConfig,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "AdditionalPodRangesConfig") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "DesiredAddonsConfig") to +- // include in API requests with the JSON null value. By default, fields +- // with empty values are omitted from API requests. However, any field +- // with an empty value appearing in NullFields will be sent to the +- // server as null. It is an error if a field in this list has a +- // non-empty value. This may be used to include null fields in Patch +- // requests. ++ // NullFields is a list of field names (e.g. ++ // "AdditionalPodRangesConfig") to include in API requests with the JSON ++ // null value. By default, fields with empty values are omitted from API ++ // requests. However, any field with an empty value appearing in ++ // NullFields will be sent to the server as null. It is an error if a ++ // field in this list has a non-empty value. This may be used to include ++ // null fields in Patch requests. + NullFields []string `json:"-"` + } + +@@ -1952,7 +2150,7 @@ type DatabaseEncryption struct { + // y + KeyName string `json:"keyName,omitempty"` + +- // State: Denotes the state of etcd encryption. ++ // State: The desired state of etcd encryption. + // + // Possible values: + // "UNKNOWN" - Should never be set +@@ -2155,6 +2353,45 @@ func (s *Filter) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// Fleet: Fleet is the fleet configuration for the cluster. ++type Fleet struct { ++ // Membership: [Output only] The full resource name of the registered ++ // fleet membership of the cluster, in the format ++ // `//gkehub.googleapis.com/projects/*/locations/*/memberships/*`. ++ Membership string `json:"membership,omitempty"` ++ ++ // PreRegistered: [Output only] Whether the cluster has been registered ++ // through the fleet API. ++ PreRegistered bool `json:"preRegistered,omitempty"` ++ ++ // Project: The Fleet host project(project ID or project number) where ++ // this cluster will be registered to. This field cannot be changed ++ // after the cluster has been registered. ++ Project string `json:"project,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Membership") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Membership") to include in ++ // API requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *Fleet) MarshalJSON() ([]byte, error) { ++ type NoMethod Fleet ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // GPUSharingConfig: GPUSharingConfig represents the GPU sharing + // configuration for Hardware Accelerators. + type GPUSharingConfig struct { +@@ -2576,6 +2813,12 @@ func (s *ILBSubsettingConfig) MarshalJSON() ([]byte, error) { + // IPAllocationPolicy: Configuration for controlling how IPs are + // allocated in the cluster. + type IPAllocationPolicy struct { ++ // AdditionalPodRangesConfig: Output only. [Output only] The additional ++ // pod ranges that are added to the cluster. These pod ranges can be ++ // used by new node pools to allocate pod IPs automatically. Once the ++ // range is removed it will not show up in IPAllocationPolicy. ++ AdditionalPodRangesConfig *AdditionalPodRangesConfig `json:"additionalPodRangesConfig,omitempty"` ++ + // ClusterIpv4Cidr: This field is deprecated, use + // cluster_ipv4_cidr_block. + ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` +@@ -2630,6 +2873,17 @@ type IPAllocationPolicy struct { + // specific range to use. + NodeIpv4CidrBlock string `json:"nodeIpv4CidrBlock,omitempty"` + ++ // PodCidrOverprovisionConfig: [PRIVATE FIELD] Pod CIDR size ++ // overprovisioning config for the cluster. Pod CIDR size per node ++ // depends on max_pods_per_node. By default, the value of ++ // max_pods_per_node is doubled and then rounded off to next power of 2 ++ // to get the size of pod CIDR block per node. Example: ++ // max_pods_per_node of 30 would result in 64 IPs (/26). This config can ++ // disable the doubling of IPs (we still round off to next power of 2) ++ // Example: max_pods_per_node of 30 will result in 32 IPs (/27) when ++ // overprovisioning is disabled. ++ PodCidrOverprovisionConfig *PodCIDROverprovisionConfig `json:"podCidrOverprovisionConfig,omitempty"` ++ + // ServicesIpv4Cidr: This field is deprecated, use + // services_ipv4_cidr_block. + ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` +@@ -2699,21 +2953,22 @@ type IPAllocationPolicy struct { + // false, then the server picks the default IP allocation mode + UseRoutes bool `json:"useRoutes,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "ClusterIpv4Cidr") to +- // unconditionally include in API requests. By default, fields with +- // empty or default values are omitted from API requests. However, any +- // non-pointer, non-interface field appearing in ForceSendFields will be +- // sent to the server regardless of whether the field is empty or not. +- // This may be used to include empty fields in Patch requests. ++ // ForceSendFields is a list of field names (e.g. ++ // "AdditionalPodRangesConfig") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "ClusterIpv4Cidr") to +- // include in API requests with the JSON null value. By default, fields +- // with empty values are omitted from API requests. However, any field +- // with an empty value appearing in NullFields will be sent to the +- // server as null. It is an error if a field in this list has a +- // non-empty value. This may be used to include null fields in Patch +- // requests. ++ // NullFields is a list of field names (e.g. ++ // "AdditionalPodRangesConfig") to include in API requests with the JSON ++ // null value. By default, fields with empty values are omitted from API ++ // requests. However, any field with an empty value appearing in ++ // NullFields will be sent to the server as null. It is an error if a ++ // field in this list has a non-empty value. This may be used to include ++ // null fields in Patch requests. + NullFields []string `json:"-"` + } + +@@ -2834,6 +3089,34 @@ func (s *Jwk) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// K8sBetaAPIConfig: K8sBetaAPIConfig , configuration for beta APIs ++type K8sBetaAPIConfig struct { ++ // EnabledApis: Enabled k8s beta APIs. ++ EnabledApis []string `json:"enabledApis,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "EnabledApis") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "EnabledApis") to include ++ // in API requests with the JSON null value. By default, fields with ++ // empty values are omitted from API requests. However, any field with ++ // an empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *K8sBetaAPIConfig) MarshalJSON() ([]byte, error) { ++ type NoMethod K8sBetaAPIConfig ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // KubernetesDashboard: Configuration for the Kubernetes Dashboard. + type KubernetesDashboard struct { + // Disabled: Whether the Kubernetes Dashboard is enabled for this +@@ -3696,6 +3979,10 @@ type NetworkConfig struct { + // DnsConfig: DNSConfig contains clusterDNS config for this cluster. + DnsConfig *DNSConfig `json:"dnsConfig,omitempty"` + ++ // EnableFqdnNetworkPolicy: Whether FQDN Network Policy is enabled on ++ // this cluster. ++ EnableFqdnNetworkPolicy bool `json:"enableFqdnNetworkPolicy,omitempty"` ++ + // EnableIntraNodeVisibility: Whether Intra-node visibility is enabled + // for this cluster. This makes same node pod to pod traffic visible for + // VPC network. +@@ -3895,6 +4182,47 @@ func (s *NetworkTags) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// NodeAffinity: Specifies the NodeAffinity key, values, and affinity ++// operator according to shared sole tenant node group affinities ++// (https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity). ++type NodeAffinity struct { ++ // Key: Key for NodeAffinity. ++ Key string `json:"key,omitempty"` ++ ++ // Operator: Operator for NodeAffinity. ++ // ++ // Possible values: ++ // "OPERATOR_UNSPECIFIED" - Invalid or unspecified affinity operator. ++ // "IN" - Affinity operator. ++ // "NOT_IN" - Anti-affinity operator. ++ Operator string `json:"operator,omitempty"` ++ ++ // Values: Values for NodeAffinity. ++ Values []string `json:"values,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Key") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Key") to include in API ++ // requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NodeAffinity) MarshalJSON() ([]byte, error) { ++ type NoMethod NodeAffinity ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // NodeConfig: Parameters that describe the nodes in a cluster. GKE + // Autopilot clusters do not recognize parameters in `NodeConfig`. Use + // AutoprovisioningNodePoolDefaults instead. +@@ -4057,6 +4385,10 @@ type NodeConfig struct { + // ShieldedInstanceConfig: Shielded Instance options. + ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + ++ // SoleTenantConfig: Parameters for node pools to be backed by shared ++ // sole tenant node groups. ++ SoleTenantConfig *SoleTenantConfig `json:"soleTenantConfig,omitempty"` ++ + // Spot: Spot flag for enabling Spot VM, which is a rebrand of the + // existing preemptible flag. + Spot bool `json:"spot,omitempty"` +@@ -4282,6 +4614,17 @@ type NodeNetworkConfig struct { + // NetworkPerformanceConfig: Network bandwidth tier configuration. + NetworkPerformanceConfig *NetworkPerformanceConfig `json:"networkPerformanceConfig,omitempty"` + ++ // PodCidrOverprovisionConfig: [PRIVATE FIELD] Pod CIDR size ++ // overprovisioning config for the nodepool. Pod CIDR size per node ++ // depends on max_pods_per_node. By default, the value of ++ // max_pods_per_node is rounded off to next power of 2 and we then ++ // double that to get the size of pod CIDR block per node. Example: ++ // max_pods_per_node of 30 would result in 64 IPs (/26). This config can ++ // disable the doubling of IPs (we still round off to next power of 2) ++ // Example: max_pods_per_node of 30 will result in 32 IPs (/27) when ++ // overprovisioning is disabled. ++ PodCidrOverprovisionConfig *PodCIDROverprovisionConfig `json:"podCidrOverprovisionConfig,omitempty"` ++ + // PodIpv4CidrBlock: The IP address range for pod IPs in this node pool. + // Only applicable if `create_pod_range` is true. Set to blank to have a + // range chosen with the default size. Set to /netmask (e.g. `/14`) to +@@ -4335,6 +4678,9 @@ type NodePool struct { + // is enabled only if a valid configuration is present. + Autoscaling *NodePoolAutoscaling `json:"autoscaling,omitempty"` + ++ // BestEffortProvisioning: Enable best effort provisioning for nodes ++ BestEffortProvisioning *BestEffortProvisioning `json:"bestEffortProvisioning,omitempty"` ++ + // Conditions: Which conditions caused the current node pool state. + Conditions []*StatusCondition `json:"conditions,omitempty"` + +@@ -4752,29 +5098,95 @@ type Operation struct { + // + // Possible values: + // "TYPE_UNSPECIFIED" - Not set. +- // "CREATE_CLUSTER" - Cluster create. +- // "DELETE_CLUSTER" - Cluster delete. +- // "UPGRADE_MASTER" - A master upgrade. +- // "UPGRADE_NODES" - A node upgrade. +- // "REPAIR_CLUSTER" - Cluster repair. +- // "UPDATE_CLUSTER" - Cluster update. +- // "CREATE_NODE_POOL" - Node pool create. +- // "DELETE_NODE_POOL" - Node pool delete. +- // "SET_NODE_POOL_MANAGEMENT" - Set node pool management. +- // "AUTO_REPAIR_NODES" - Automatic node pool repair. +- // "AUTO_UPGRADE_NODES" - Automatic node upgrade. +- // "SET_LABELS" - Set labels. +- // "SET_MASTER_AUTH" - Set/generate master auth materials +- // "SET_NODE_POOL_SIZE" - Set node pool size. +- // "SET_NETWORK_POLICY" - Updates network policy for a cluster. +- // "SET_MAINTENANCE_POLICY" - Set the maintenance policy. ++ // "CREATE_CLUSTER" - The cluster is being created. The cluster should ++ // be assumed to be unusable until the operation finishes. In the event ++ // of the operation failing, the cluster will enter the ERROR state and ++ // eventually be deleted. ++ // "DELETE_CLUSTER" - The cluster is being deleted. The cluster should ++ // be assumed to be unusable as soon as this operation starts. In the ++ // event of the operation failing, the cluster will enter the ERROR ++ // state and the deletion will be automatically retried until completed. ++ // "UPGRADE_MASTER" - The cluster version is being updated. Note that ++ // this includes "upgrades" to the same version, which are simply a ++ // recreation. This also includes ++ // [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/concep ++ // ts/cluster-upgrades#upgrading_automatically). For more details, see ++ // [documentation on cluster ++ // upgrades](https://cloud.google.com/kubernetes-engine/docs/concepts/clu ++ // ster-upgrades#cluster_upgrades). ++ // "UPGRADE_NODES" - A node pool is being updated. Despite calling ++ // this an "upgrade", this includes most forms of updates to node pools. ++ // This also includes ++ // [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/how-to ++ // /node-auto-upgrades). This operation sets the progress field and may ++ // be canceled. The upgrade strategy depends on [node pool ++ // configuration](https://cloud.google.com/kubernetes-engine/docs/concept ++ // s/node-pool-upgrade-strategies). The nodes are generally still usable ++ // during this operation. ++ // "REPAIR_CLUSTER" - A problem has been detected with the control ++ // plane and is being repaired. This operation type is initiated by GKE. ++ // For more details, see [documentation on ++ // repairs](https://cloud.google.com/kubernetes-engine/docs/concepts/main ++ // tenance-windows-and-exclusions#repairs). ++ // "UPDATE_CLUSTER" - The cluster is being updated. This is a broad ++ // category of operations and includes operations that only change ++ // metadata as well as those that must recreate the entire cluster. If ++ // the control plane must be recreated, this will cause temporary ++ // downtime for zonal clusters. Some features require recreating the ++ // nodes as well. Those will be recreated as separate operations and the ++ // update may not be completely functional until the node pools ++ // recreations finish. Node recreations will generally follow ++ // [maintenance ++ // policies](https://cloud.google.com/kubernetes-engine/docs/concepts/mai ++ // ntenance-windows-and-exclusions). Some GKE-initiated operations use ++ // this type. This includes certain types of auto-upgrades and incident ++ // mitigations. ++ // "CREATE_NODE_POOL" - A node pool is being created. The node pool ++ // should be assumed to be unusable until this operation finishes. In ++ // the event of an error, the node pool may be partially created. If ++ // enabled, [node ++ // autoprovisioning](https://cloud.google.com/kubernetes-engine/docs/how- ++ // to/node-auto-provisioning) may have automatically initiated such ++ // operations. ++ // "DELETE_NODE_POOL" - The node pool is being deleted. The node pool ++ // should be assumed to be unusable as soon as this operation starts. ++ // "SET_NODE_POOL_MANAGEMENT" - The node pool's manamagent field is ++ // being updated. These operations only update metadata and may be ++ // concurrent with most other operations. ++ // "AUTO_REPAIR_NODES" - A problem has been detected with nodes and ++ // [they are being ++ // repaired](https://cloud.google.com/kubernetes-engine/docs/how-to/node- ++ // auto-repair). This operation type is initiated by GKE, typically ++ // automatically. This operation may be concurrent with other operations ++ // and there may be multiple repairs occurring on the same node pool. ++ // "AUTO_UPGRADE_NODES" - Unused. Automatic node upgrade uses ++ // UPGRADE_NODES. ++ // "SET_LABELS" - Unused. Updating labels uses UPDATE_CLUSTER. ++ // "SET_MASTER_AUTH" - Unused. Updating master auth uses ++ // UPDATE_CLUSTER. ++ // "SET_NODE_POOL_SIZE" - The node pool is being resized. With the ++ // exception of resizing to or from size zero, the node pool is ++ // generally usable during this operation. ++ // "SET_NETWORK_POLICY" - Unused. Updating network policy uses ++ // UPDATE_CLUSTER. ++ // "SET_MAINTENANCE_POLICY" - Unused. Updating maintenance policy uses ++ // UPDATE_CLUSTER. ++ // "RESIZE_CLUSTER" - The control plane is being resized. This ++ // operation type is initiated by GKE. These operations are often ++ // performed preemptively to ensure that the control plane has ++ // sufficient resources and is not typically an indication of issues. ++ // For more details, see [documentation on ++ // resizes](https://cloud.google.com/kubernetes-engine/docs/concepts/main ++ // tenance-windows-and-exclusions#repairs). + OperationType string `json:"operationType,omitempty"` + + // Progress: Output only. [Output only] Progress information for an + // operation. + Progress *OperationProgress `json:"progress,omitempty"` + +- // SelfLink: Server-defined URL for the resource. ++ // SelfLink: Server-defined URI for the operation. Example: ++ // `https://container.googleapis.com/v1alpha1/projects/123/locations/us-c ++ // entral1/operations/operation-123`. + SelfLink string `json:"selfLink,omitempty"` + + // StartTime: [Output only] The time the operation started, in RFC3339 +@@ -4795,7 +5207,17 @@ type Operation struct { + // description of the error. Deprecated. Use the field error instead. + StatusMessage string `json:"statusMessage,omitempty"` + +- // TargetLink: Server-defined URL for the target of the operation. ++ // TargetLink: Server-defined URI for the target of the operation. The ++ // format of this is a URI to the resource being modified (such as a ++ // cluster, node pool, or node). For node pool repairs, there may be ++ // multiple nodes being repaired, but only one will be the target. ++ // Examples: - ## ++ // `https://container.googleapis.com/v1/projects/123/locations/us-central ++ // 1/clusters/my-cluster` ## ++ // `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/ ++ // clusters/my-cluster/nodePools/my-np` ++ // `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/ ++ // clusters/my-cluster/nodePools/my-np/node/my-node` + TargetLink string `json:"targetLink,omitempty"` + + // Zone: The name of the Google Compute Engine zone +@@ -4917,6 +5339,36 @@ func (s *PlacementPolicy) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// PodCIDROverprovisionConfig: [PRIVATE FIELD] Config for pod CIDR size ++// overprovisioning. ++type PodCIDROverprovisionConfig struct { ++ // Disable: Whether Pod CIDR overprovisioning is disabled. Note: Pod ++ // CIDR overprovisioning is enabled by default. ++ Disable bool `json:"disable,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "Disable") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "Disable") to include in ++ // API requests with the JSON null value. By default, fields with empty ++ // values are omitted from API requests. However, any field with an ++ // empty value appearing in NullFields will be sent to the server as ++ // null. It is an error if a field in this list has a non-empty value. ++ // This may be used to include null fields in Patch requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *PodCIDROverprovisionConfig) MarshalJSON() ([]byte, error) { ++ type NoMethod PodCIDROverprovisionConfig ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // PrivateClusterConfig: Configuration options for private clusters. + type PrivateClusterConfig struct { + // EnablePrivateEndpoint: Whether the master's internal IP address is +@@ -6306,6 +6758,38 @@ func (s *ShieldedNodes) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// SoleTenantConfig: SoleTenantConfig contains the NodeAffinities to ++// specify what shared sole tenant node groups should back the node ++// pool. ++type SoleTenantConfig struct { ++ // NodeAffinities: NodeAffinities used to match to a shared sole tenant ++ // node group. ++ NodeAffinities []*NodeAffinity `json:"nodeAffinities,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. "NodeAffinities") to ++ // unconditionally include in API requests. By default, fields with ++ // empty or default values are omitted from API requests. However, any ++ // non-pointer, non-interface field appearing in ForceSendFields will be ++ // sent to the server regardless of whether the field is empty or not. ++ // This may be used to include empty fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "NodeAffinities") to ++ // include in API requests with the JSON null value. By default, fields ++ // with empty values are omitted from API requests. However, any field ++ // with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *SoleTenantConfig) MarshalJSON() ([]byte, error) { ++ type NoMethod SoleTenantConfig ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // StandardRolloutPolicy: Standard rollout policy is the default policy + // for blue-green. + type StandardRolloutPolicy struct { +@@ -7153,8 +7637,8 @@ type UsableSubnetworkSecondaryRange struct { + // "UNUSED" - UNUSED denotes that this range is unclaimed by any + // cluster. + // "IN_USE_SERVICE" - IN_USE_SERVICE denotes that this range is +- // claimed by a cluster for services. It cannot be used for other +- // clusters. ++ // claimed by cluster(s) for services. User-managed services range can ++ // be shared between clusters within the same subnetwork. + // "IN_USE_SHAREABLE_POD" - IN_USE_SHAREABLE_POD denotes this range + // was created by the network admin and is currently claimed by a + // cluster for pods. It can only be used by other clusters as a pod +@@ -7742,6 +8226,156 @@ func (c *ProjectsLocationsGetServerConfigCall) Do(opts ...googleapi.CallOption) + + } + ++// method id "container.projects.locations.clusters.checkAutopilotCompatibility": ++ ++type ProjectsLocationsClustersCheckAutopilotCompatibilityCall struct { ++ s *Service ++ name string ++ urlParams_ gensupport.URLParams ++ ifNoneMatch_ string ++ ctx_ context.Context ++ header_ http.Header ++} ++ ++// CheckAutopilotCompatibility: Checks the cluster compatibility with ++// Autopilot mode, and returns a list of compatibility issues. ++// ++// - name: The name (project, location, cluster) of the cluster to ++// retrieve. Specified in the format ++// `projects/*/locations/*/clusters/*`. ++func (r *ProjectsLocationsClustersService) CheckAutopilotCompatibility(name string) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { ++ c := &ProjectsLocationsClustersCheckAutopilotCompatibilityCall{s: r.s, urlParams_: make(gensupport.URLParams)} ++ c.name = name ++ return c ++} ++ ++// Fields allows partial responses to be retrieved. See ++// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse ++// for more information. ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { ++ c.urlParams_.Set("fields", googleapi.CombineFields(s)) ++ return c ++} ++ ++// IfNoneMatch sets the optional parameter which makes the operation ++// fail if the object's ETag matches the given value. This is useful for ++// getting updates only after the object has changed since the last ++// request. Use googleapi.IsNotModified to check whether the response ++// error from Do is the result of In-None-Match. ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) IfNoneMatch(entityTag string) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { ++ c.ifNoneMatch_ = entityTag ++ return c ++} ++ ++// Context sets the context to be used in this call's Do method. Any ++// pending HTTP request will be aborted if the provided context is ++// canceled. ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Context(ctx context.Context) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { ++ c.ctx_ = ctx ++ return c ++} ++ ++// Header returns an http.Header that can be modified by the caller to ++// add HTTP headers to the request. ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Header() http.Header { ++ if c.header_ == nil { ++ c.header_ = make(http.Header) ++ } ++ return c.header_ ++} ++ ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) doRequest(alt string) (*http.Response, error) { ++ reqHeaders := make(http.Header) ++ reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) ++ for k, v := range c.header_ { ++ reqHeaders[k] = v ++ } ++ reqHeaders.Set("User-Agent", c.s.userAgent()) ++ if c.ifNoneMatch_ != "" { ++ reqHeaders.Set("If-None-Match", c.ifNoneMatch_) ++ } ++ var body io.Reader = nil ++ c.urlParams_.Set("alt", alt) ++ c.urlParams_.Set("prettyPrint", "false") ++ urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:checkAutopilotCompatibility") ++ urls += "?" + c.urlParams_.Encode() ++ req, err := http.NewRequest("GET", urls, body) ++ if err != nil { ++ return nil, err ++ } ++ req.Header = reqHeaders ++ googleapi.Expand(req.URL, map[string]string{ ++ "name": c.name, ++ }) ++ return gensupport.SendRequest(c.ctx_, c.s.client, req) ++} ++ ++// Do executes the "container.projects.locations.clusters.checkAutopilotCompatibility" call. ++// Exactly one of *CheckAutopilotCompatibilityResponse or error will be ++// non-nil. Any non-2xx status code is an error. Response headers are in ++// either *CheckAutopilotCompatibilityResponse.ServerResponse.Header or ++// (if a response was returned at all) in ++// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check ++// whether the returned error was because http.StatusNotModified was ++// returned. ++func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Do(opts ...googleapi.CallOption) (*CheckAutopilotCompatibilityResponse, error) { ++ gensupport.SetOptions(c.urlParams_, opts...) ++ res, err := c.doRequest("json") ++ if res != nil && res.StatusCode == http.StatusNotModified { ++ if res.Body != nil { ++ res.Body.Close() ++ } ++ return nil, gensupport.WrapError(&googleapi.Error{ ++ Code: res.StatusCode, ++ Header: res.Header, ++ }) ++ } ++ if err != nil { ++ return nil, err ++ } ++ defer googleapi.CloseBody(res) ++ if err := googleapi.CheckResponse(res); err != nil { ++ return nil, gensupport.WrapError(err) ++ } ++ ret := &CheckAutopilotCompatibilityResponse{ ++ ServerResponse: googleapi.ServerResponse{ ++ Header: res.Header, ++ HTTPStatusCode: res.StatusCode, ++ }, ++ } ++ target := &ret ++ if err := gensupport.DecodeResponse(target, res); err != nil { ++ return nil, err ++ } ++ return ret, nil ++ // { ++ // "description": "Checks the cluster compatibility with Autopilot mode, and returns a list of compatibility issues.", ++ // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:checkAutopilotCompatibility", ++ // "httpMethod": "GET", ++ // "id": "container.projects.locations.clusters.checkAutopilotCompatibility", ++ // "parameterOrder": [ ++ // "name" ++ // ], ++ // "parameters": { ++ // "name": { ++ // "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", ++ // "location": "path", ++ // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", ++ // "required": true, ++ // "type": "string" ++ // } ++ // }, ++ // "path": "v1/{+name}:checkAutopilotCompatibility", ++ // "response": { ++ // "$ref": "CheckAutopilotCompatibilityResponse" ++ // }, ++ // "scopes": [ ++ // "https://www.googleapis.com/auth/cloud-platform" ++ // ] ++ // } ++ ++} ++ + // method id "container.projects.locations.clusters.completeIpRotation": + + type ProjectsLocationsClustersCompleteIpRotationCall struct { +@@ -9138,6 +9772,7 @@ func (c *ProjectsLocationsClustersSetLocationsCall) Do(opts ...googleapi.CallOpt + } + return ret, nil + // { ++ // "deprecated": true, + // "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", + // "httpMethod": "POST", +@@ -14247,6 +14882,7 @@ func (c *ProjectsZonesClustersLocationsCall) Do(opts ...googleapi.CallOption) (* + } + return ret, nil + // { ++ // "deprecated": true, + // "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", + // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", + // "httpMethod": "POST", +diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go +index b328a7976ab..b5e38c66282 100644 +--- a/vendor/google.golang.org/api/googleapi/googleapi.go ++++ b/vendor/google.golang.org/api/googleapi/googleapi.go +@@ -11,7 +11,6 @@ import ( + "encoding/json" + "fmt" + "io" +- "io/ioutil" + "net/http" + "net/url" + "strings" +@@ -144,7 +143,7 @@ func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } +- slurp, err := ioutil.ReadAll(res.Body) ++ slurp, err := io.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) +@@ -184,7 +183,7 @@ func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } +- slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) ++ slurp, _ := io.ReadAll(io.LimitReader(res.Body, 1<<20)) + return &Error{ + Code: res.StatusCode, + Body: string(slurp), +diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go +new file mode 100644 +index 00000000000..cecbb9ba115 +--- /dev/null ++++ b/vendor/google.golang.org/api/internal/cba.go +@@ -0,0 +1,282 @@ ++// Copyright 2020 Google LLC. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// cba.go (certificate-based access) contains utils for implementing Device Certificate ++// Authentication according to https://google.aip.dev/auth/4114 and Default Credentials ++// for Google Cloud Virtual Environments according to https://google.aip.dev/auth/4115. ++// ++// The overall logic for DCA is as follows: ++// 1. If both endpoint override and client certificate are specified, use them as is. ++// 2. If user does not specify client certificate, we will attempt to use default ++// client certificate. ++// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if ++// client certificate is available and defaultEndpoint otherwise. ++// ++// Implications of the above logic: ++// 1. If the user specifies a non-mTLS endpoint override but client certificate is ++// available, we will pass along the cert anyway and let the server decide what to do. ++// 2. If the user specifies an mTLS endpoint override but client certificate is not ++// available, we will not fail-fast, but let backend throw error when connecting. ++// ++// If running within Google's cloud environment, and client certificate is not specified ++// and not available through DCA, we will try mTLS with credentials held by ++// the Secure Session Agent, which is part of Google's cloud infrastructure. ++// ++// We would like to avoid introducing client-side logic that parses whether the ++// endpoint override is an mTLS url, since the url pattern may change at anytime. ++// ++// This package is not intended for use by end developers. Use the ++// google.golang.org/api/option package to configure API clients. ++ ++// Package internal supports the options and transport packages. ++package internal ++ ++import ( ++ "context" ++ "crypto/tls" ++ "net" ++ "net/url" ++ "os" ++ "strings" ++ ++ "github.com/google/s2a-go" ++ "github.com/google/s2a-go/fallback" ++ "google.golang.org/api/internal/cert" ++ "google.golang.org/grpc/credentials" ++) ++ ++const ( ++ mTLSModeAlways = "always" ++ mTLSModeNever = "never" ++ mTLSModeAuto = "auto" ++ ++ // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. ++ googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" ++) ++ ++// getClientCertificateSourceAndEndpoint is a convenience function that invokes ++// getClientCertificateSource and getEndpoint sequentially and returns the client ++// cert source and endpoint as a tuple. ++func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { ++ clientCertSource, err := getClientCertificateSource(settings) ++ if err != nil { ++ return nil, "", err ++ } ++ endpoint, err := getEndpoint(settings, clientCertSource) ++ if err != nil { ++ return nil, "", err ++ } ++ return clientCertSource, endpoint, nil ++} ++ ++type transportConfig struct { ++ clientCertSource cert.Source // The client certificate source. ++ endpoint string // The corresponding endpoint to use based on client certificate source. ++ s2aAddress string // The S2A address if it can be used, otherwise an empty string. ++ s2aMTLSEndpoint string // The MTLS endpoint to use with S2A. ++} ++ ++func getTransportConfig(settings *DialSettings) (*transportConfig, error) { ++ clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings) ++ if err != nil { ++ return &transportConfig{ ++ clientCertSource: nil, endpoint: "", s2aAddress: "", s2aMTLSEndpoint: "", ++ }, err ++ } ++ defaultTransportConfig := transportConfig{ ++ clientCertSource: clientCertSource, ++ endpoint: endpoint, ++ s2aAddress: "", ++ s2aMTLSEndpoint: "", ++ } ++ ++ // Check the env to determine whether to use S2A. ++ if !isGoogleS2AEnabled() { ++ return &defaultTransportConfig, nil ++ } ++ ++ // If client cert is found, use that over S2A. ++ // If MTLS is not enabled for the endpoint, skip S2A. ++ if clientCertSource != nil || !mtlsEndpointEnabledForS2A() { ++ return &defaultTransportConfig, nil ++ } ++ s2aMTLSEndpoint := settings.DefaultMTLSEndpoint ++ // If there is endpoint override, honor it. ++ if settings.Endpoint != "" { ++ s2aMTLSEndpoint = endpoint ++ } ++ s2aAddress := GetS2AAddress() ++ if s2aAddress == "" { ++ return &defaultTransportConfig, nil ++ } ++ return &transportConfig{ ++ clientCertSource: clientCertSource, ++ endpoint: endpoint, ++ s2aAddress: s2aAddress, ++ s2aMTLSEndpoint: s2aMTLSEndpoint, ++ }, nil ++} ++ ++func isGoogleS2AEnabled() bool { ++ return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" ++} ++ ++// getClientCertificateSource returns a default client certificate source, if ++// not provided by the user. ++// ++// A nil default source can be returned if the source does not exist. Any exceptions ++// encountered while initializing the default source will be reported as client ++// error (ex. corrupt metadata file). ++// ++// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE ++// must be set to "true" to allow certificate to be used (including user provided ++// certificates). For details, see AIP-4114. ++func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { ++ if !isClientCertificateEnabled() { ++ return nil, nil ++ } else if settings.ClientCertSource != nil { ++ return settings.ClientCertSource, nil ++ } else { ++ return cert.DefaultSource() ++ } ++} ++ ++func isClientCertificateEnabled() bool { ++ useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") ++ // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. ++ return strings.ToLower(useClientCert) == "true" ++} ++ ++// getEndpoint returns the endpoint for the service, taking into account the ++// user-provided endpoint override "settings.Endpoint". ++// ++// If no endpoint override is specified, we will either return the default endpoint or ++// the default mTLS endpoint if a client certificate is available. ++// ++// You can override the default endpoint choice (mtls vs. regular) by setting the ++// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. ++// ++// If the endpoint override is an address (host:port) rather than full base ++// URL (ex. https://...), then the user-provided address will be merged into ++// the default endpoint. For example, WithEndpoint("myhost:8000") and ++// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" ++func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { ++ if settings.Endpoint == "" { ++ mtlsMode := getMTLSMode() ++ if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { ++ return settings.DefaultMTLSEndpoint, nil ++ } ++ return settings.DefaultEndpoint, nil ++ } ++ if strings.Contains(settings.Endpoint, "://") { ++ // User passed in a full URL path, use it verbatim. ++ return settings.Endpoint, nil ++ } ++ if settings.DefaultEndpoint == "" { ++ // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. ++ // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. ++ return settings.Endpoint, nil ++ } ++ ++ // Assume user-provided endpoint is host[:port], merge it with the default endpoint. ++ return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) ++} ++ ++func getMTLSMode() string { ++ mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") ++ if mode == "" { ++ mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. ++ } ++ if mode == "" { ++ return mTLSModeAuto ++ } ++ return strings.ToLower(mode) ++} ++ ++func mergeEndpoints(baseURL, newHost string) (string, error) { ++ u, err := url.Parse(fixScheme(baseURL)) ++ if err != nil { ++ return "", err ++ } ++ return strings.Replace(baseURL, u.Host, newHost, 1), nil ++} ++ ++func fixScheme(baseURL string) string { ++ if !strings.Contains(baseURL, "://") { ++ return "https://" + baseURL ++ } ++ return baseURL ++} ++ ++// GetGRPCTransportConfigAndEndpoint returns an instance of credentials.TransportCredentials, and the ++// corresponding endpoint to use for GRPC client. ++func GetGRPCTransportConfigAndEndpoint(settings *DialSettings) (credentials.TransportCredentials, string, error) { ++ config, err := getTransportConfig(settings) ++ if err != nil { ++ return nil, "", err ++ } ++ ++ defaultTransportCreds := credentials.NewTLS(&tls.Config{ ++ GetClientCertificate: config.clientCertSource, ++ }) ++ if config.s2aAddress == "" { ++ return defaultTransportCreds, config.endpoint, nil ++ } ++ ++ var fallbackOpts *s2a.FallbackOptions ++ // In case of S2A failure, fall back to the endpoint that would've been used without S2A. ++ if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { ++ fallbackOpts = &s2a.FallbackOptions{ ++ FallbackClientHandshakeFunc: fallbackHandshake, ++ } ++ } ++ ++ s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ ++ S2AAddress: config.s2aAddress, ++ FallbackOpts: fallbackOpts, ++ }) ++ if err != nil { ++ // Use default if we cannot initialize S2A client transport credentials. ++ return defaultTransportCreds, config.endpoint, nil ++ } ++ return s2aTransportCreds, config.s2aMTLSEndpoint, nil ++} ++ ++// GetHTTPTransportConfigAndEndpoint returns a client certificate source, a function for dialing MTLS with S2A, ++// and the endpoint to use for HTTP client. ++func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, func(context.Context, string, string) (net.Conn, error), string, error) { ++ config, err := getTransportConfig(settings) ++ if err != nil { ++ return nil, nil, "", err ++ } ++ ++ if config.s2aAddress == "" { ++ return config.clientCertSource, nil, config.endpoint, nil ++ } ++ ++ var fallbackOpts *s2a.FallbackOptions ++ // In case of S2A failure, fall back to the endpoint that would've been used without S2A. ++ if fallbackURL, err := url.Parse(config.endpoint); err == nil { ++ if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { ++ fallbackOpts = &s2a.FallbackOptions{ ++ FallbackDialer: &s2a.FallbackDialer{ ++ Dialer: fallbackDialer, ++ ServerAddr: fallbackServerAddr, ++ }, ++ } ++ } ++ } ++ ++ dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ ++ S2AAddress: config.s2aAddress, ++ FallbackOpts: fallbackOpts, ++ }) ++ return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil ++} ++ ++// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. ++var mtlsEndpointEnabledForS2A = func() bool { ++ // TODO(xmenxk): determine this via discovery config. ++ return true ++} +diff --git a/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go +index 5913cab8017..afd79ffe2be 100644 +--- a/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go ++++ b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go +@@ -18,7 +18,6 @@ import ( + "encoding/json" + "errors" + "fmt" +- "io/ioutil" + "os" + "os/exec" + "os/user" +@@ -59,7 +58,7 @@ func NewSecureConnectSource(configFilePath string) (Source, error) { + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + +- file, err := ioutil.ReadFile(configFilePath) ++ file, err := os.ReadFile(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Secure Connect is not supported. +diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go +index 63c66092203..92b3acf6edf 100644 +--- a/vendor/google.golang.org/api/internal/creds.go ++++ b/vendor/google.golang.org/api/internal/creds.go +@@ -10,7 +10,6 @@ import ( + "encoding/json" + "errors" + "fmt" +- "io/ioutil" + "net" + "net/http" + "os" +@@ -48,7 +47,7 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) + } + if ds.CredentialsFile != "" { +- data, err := ioutil.ReadFile(ds.CredentialsFile) ++ data, err := os.ReadFile(ds.CredentialsFile) + if err != nil { + return nil, fmt.Errorf("cannot read credentials file: %v", err) + } +@@ -92,7 +91,7 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g + + // Determine configurations for the OAuth2 transport, which is separate from the API transport. + // The OAuth2 transport and endpoint will be configured for mTLS if applicable. +- clientCertSource, oauth2Endpoint, err := GetClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) ++ clientCertSource, oauth2Endpoint, err := getClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) + if err != nil { + return nil, err + } +diff --git a/vendor/google.golang.org/api/internal/dca.go b/vendor/google.golang.org/api/internal/dca.go +deleted file mode 100644 +index 204a3fd2f3f..00000000000 +--- a/vendor/google.golang.org/api/internal/dca.go ++++ /dev/null +@@ -1,144 +0,0 @@ +-// Copyright 2020 Google LLC. +-// Use of this source code is governed by a BSD-style +-// license that can be found in the LICENSE file. +- +-// Package dca contains utils for implementing Device Certificate +-// Authentication according to https://google.aip.dev/auth/4114 +-// +-// The overall logic for DCA is as follows: +-// 1. If both endpoint override and client certificate are specified, use them as is. +-// 2. If user does not specify client certificate, we will attempt to use default +-// client certificate. +-// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +-// client certificate is available and defaultEndpoint otherwise. +-// +-// Implications of the above logic: +-// 1. If the user specifies a non-mTLS endpoint override but client certificate is +-// available, we will pass along the cert anyway and let the server decide what to do. +-// 2. If the user specifies an mTLS endpoint override but client certificate is not +-// available, we will not fail-fast, but let backend throw error when connecting. +-// +-// We would like to avoid introducing client-side logic that parses whether the +-// endpoint override is an mTLS url, since the url pattern may change at anytime. +-// +-// This package is not intended for use by end developers. Use the +-// google.golang.org/api/option package to configure API clients. +- +-// Package internal supports the options and transport packages. +-package internal +- +-import ( +- "net/url" +- "os" +- "strings" +- +- "google.golang.org/api/internal/cert" +-) +- +-const ( +- mTLSModeAlways = "always" +- mTLSModeNever = "never" +- mTLSModeAuto = "auto" +-) +- +-// GetClientCertificateSourceAndEndpoint is a convenience function that invokes +-// getClientCertificateSource and getEndpoint sequentially and returns the client +-// cert source and endpoint as a tuple. +-func GetClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { +- clientCertSource, err := getClientCertificateSource(settings) +- if err != nil { +- return nil, "", err +- } +- endpoint, err := getEndpoint(settings, clientCertSource) +- if err != nil { +- return nil, "", err +- } +- return clientCertSource, endpoint, nil +-} +- +-// getClientCertificateSource returns a default client certificate source, if +-// not provided by the user. +-// +-// A nil default source can be returned if the source does not exist. Any exceptions +-// encountered while initializing the default source will be reported as client +-// error (ex. corrupt metadata file). +-// +-// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE +-// must be set to "true" to allow certificate to be used (including user provided +-// certificates). For details, see AIP-4114. +-func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { +- if !isClientCertificateEnabled() { +- return nil, nil +- } else if settings.ClientCertSource != nil { +- return settings.ClientCertSource, nil +- } else { +- return cert.DefaultSource() +- } +-} +- +-func isClientCertificateEnabled() bool { +- useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") +- // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. +- return strings.ToLower(useClientCert) == "true" +-} +- +-// getEndpoint returns the endpoint for the service, taking into account the +-// user-provided endpoint override "settings.Endpoint". +-// +-// If no endpoint override is specified, we will either return the default endpoint or +-// the default mTLS endpoint if a client certificate is available. +-// +-// You can override the default endpoint choice (mtls vs. regular) by setting the +-// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +-// +-// If the endpoint override is an address (host:port) rather than full base +-// URL (ex. https://...), then the user-provided address will be merged into +-// the default endpoint. For example, WithEndpoint("myhost:8000") and +-// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +-func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { +- if settings.Endpoint == "" { +- mtlsMode := getMTLSMode() +- if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { +- return settings.DefaultMTLSEndpoint, nil +- } +- return settings.DefaultEndpoint, nil +- } +- if strings.Contains(settings.Endpoint, "://") { +- // User passed in a full URL path, use it verbatim. +- return settings.Endpoint, nil +- } +- if settings.DefaultEndpoint == "" { +- // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. +- // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. +- return settings.Endpoint, nil +- } +- +- // Assume user-provided endpoint is host[:port], merge it with the default endpoint. +- return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) +-} +- +-func getMTLSMode() string { +- mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") +- if mode == "" { +- mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. +- } +- if mode == "" { +- return mTLSModeAuto +- } +- return strings.ToLower(mode) +-} +- +-func mergeEndpoints(baseURL, newHost string) (string, error) { +- u, err := url.Parse(fixScheme(baseURL)) +- if err != nil { +- return "", err +- } +- return strings.Replace(baseURL, u.Host, newHost, 1), nil +-} +- +-func fixScheme(baseURL string) string { +- if !strings.Contains(baseURL, "://") { +- return "https://" + baseURL +- } +- return baseURL +-} +diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go +index 8356e7f27b0..c048a57084b 100644 +--- a/vendor/google.golang.org/api/internal/gensupport/media.go ++++ b/vendor/google.golang.org/api/internal/gensupport/media.go +@@ -8,7 +8,6 @@ import ( + "bytes" + "fmt" + "io" +- "io/ioutil" + "mime" + "mime/multipart" + "net/http" +@@ -222,8 +221,8 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB + toCleanup = append(toCleanup, combined) + if fb != nil && fm != nil { + getBody = func() (io.ReadCloser, error) { +- rb := ioutil.NopCloser(fb()) +- rm := ioutil.NopCloser(fm()) ++ rb := io.NopCloser(fb()) ++ rm := io.NopCloser(fm()) + var mimeBoundary string + if _, params, err := mime.ParseMediaType(ctype); err == nil { + mimeBoundary = params["boundary"] +@@ -243,7 +242,7 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB + fb := readerFunc(body) + if fb != nil { + getBody = func() (io.ReadCloser, error) { +- rb := ioutil.NopCloser(fb()) ++ rb := io.NopCloser(fb()) + toCleanup = append(toCleanup, rb) + return rb, nil + } +diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go +index f168ea6d2b7..08e7aacefb6 100644 +--- a/vendor/google.golang.org/api/internal/gensupport/resumable.go ++++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go +@@ -43,8 +43,8 @@ type ResumableUpload struct { + // retries should happen. + ChunkRetryDeadline time.Duration + +- // Track current request invocation ID and attempt count for retry metric +- // headers. ++ // Track current request invocation ID and attempt count for retry metrics ++ // and idempotency headers. + invocationID string + attempts int + } +@@ -81,10 +81,15 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + ++ // TODO(b/274504690): Consider dropping gccl-invocation-id key since it ++ // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). + baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) + req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) + ++ // Set idempotency token header which is used by GCS uploads. ++ req.Header.Set("X-Goog-Gcs-Idempotency-Token", rx.invocationID) ++ + // Google's upload endpoint uses status code 308 for a + // different purpose than the "308 Permanent Redirect" + // since-standardized in RFC 7238. Because of the conflict in +diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go +index 85c7bcbfdfc..693a1b1abaf 100644 +--- a/vendor/google.golang.org/api/internal/gensupport/send.go ++++ b/vendor/google.golang.org/api/internal/gensupport/send.go +@@ -138,9 +138,14 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r + } + return resp, ctx.Err() + } ++ ++ // Set retry metrics and idempotency headers for GCS. ++ // TODO(b/274504690): Consider dropping gccl-invocation-id key since it ++ // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") + req.Header.Set("X-Goog-Api-Client", xGoogHeader) ++ req.Header.Set("X-Goog-Gcs-Idempotency-Token", invocationID) + + resp, err = client.Do(req.WithContext(ctx)) + +diff --git a/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/vendor/google.golang.org/api/internal/impersonate/impersonate.go +index b465bbcd12e..4b2c775f210 100644 +--- a/vendor/google.golang.org/api/internal/impersonate/impersonate.go ++++ b/vendor/google.golang.org/api/internal/impersonate/impersonate.go +@@ -11,7 +11,6 @@ import ( + "encoding/json" + "fmt" + "io" +- "io/ioutil" + "net/http" + "time" + +@@ -105,7 +104,7 @@ func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { + return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) + } + defer resp.Body.Close() +- body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) ++ body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } +diff --git a/vendor/google.golang.org/api/internal/s2a.go b/vendor/google.golang.org/api/internal/s2a.go +new file mode 100644 +index 00000000000..c5b421f5544 +--- /dev/null ++++ b/vendor/google.golang.org/api/internal/s2a.go +@@ -0,0 +1,136 @@ ++// Copyright 2023 Google LLC. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package internal ++ ++import ( ++ "encoding/json" ++ "log" ++ "sync" ++ "time" ++ ++ "cloud.google.com/go/compute/metadata" ++) ++ ++const configEndpointSuffix = "googleAutoMtlsConfiguration" ++ ++// The period an MTLS config can be reused before needing refresh. ++var configExpiry = time.Hour ++ ++// GetS2AAddress returns the S2A address to be reached via plaintext connection. ++func GetS2AAddress() string { ++ c, err := getMetadataMTLSAutoConfig().Config() ++ if err != nil { ++ return "" ++ } ++ if !c.Valid() { ++ return "" ++ } ++ return c.S2A.PlaintextAddress ++} ++ ++type mtlsConfigSource interface { ++ Config() (*mtlsConfig, error) ++} ++ ++// mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. ++var ( ++ mdsMTLSAutoConfigSource mtlsConfigSource ++ once sync.Once ++) ++ ++// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. ++func getMetadataMTLSAutoConfig() mtlsConfigSource { ++ once.Do(func() { ++ mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ ++ src: &metadataMTLSAutoConfig{}, ++ } ++ }) ++ return mdsMTLSAutoConfigSource ++} ++ ++// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. ++// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. ++type reuseMTLSConfigSource struct { ++ src mtlsConfigSource // src.Config() is called when config is expired ++ mu sync.Mutex // mutex guards config ++ config *mtlsConfig // cached config ++} ++ ++func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { ++ cs.mu.Lock() ++ defer cs.mu.Unlock() ++ ++ if cs.config.Valid() { ++ return cs.config, nil ++ } ++ c, err := cs.src.Config() ++ if err != nil { ++ return nil, err ++ } ++ cs.config = c ++ return c, nil ++} ++ ++// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource ++// It has the logic to query MDS and return an mtlsConfig ++type metadataMTLSAutoConfig struct{} ++ ++var httpGetMetadataMTLSConfig = func() (string, error) { ++ return metadata.Get(configEndpointSuffix) ++} ++ ++func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { ++ resp, err := httpGetMetadataMTLSConfig() ++ if err != nil { ++ log.Printf("querying MTLS config from MDS endpoint failed: %v", err) ++ return defaultMTLSConfig(), nil ++ } ++ var config mtlsConfig ++ err = json.Unmarshal([]byte(resp), &config) ++ if err != nil { ++ log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) ++ return defaultMTLSConfig(), nil ++ } ++ ++ if config.S2A == nil { ++ log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) ++ return defaultMTLSConfig(), nil ++ } ++ ++ // set new expiry ++ config.Expiry = time.Now().Add(configExpiry) ++ return &config, nil ++} ++ ++func defaultMTLSConfig() *mtlsConfig { ++ return &mtlsConfig{ ++ S2A: &s2aAddresses{ ++ PlaintextAddress: "", ++ MTLSAddress: "", ++ }, ++ Expiry: time.Now().Add(configExpiry), ++ } ++} ++ ++// s2aAddresses contains the plaintext and/or MTLS S2A addresses. ++type s2aAddresses struct { ++ // PlaintextAddress is the plaintext address to reach S2A ++ PlaintextAddress string `json:"plaintext_address"` ++ // MTLSAddress is the MTLS address to reach S2A ++ MTLSAddress string `json:"mtls_address"` ++} ++ ++// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. ++type mtlsConfig struct { ++ S2A *s2aAddresses `json:"s2a"` ++ Expiry time.Time ++} ++ ++func (c *mtlsConfig) Valid() bool { ++ return c != nil && c.S2A != nil && !c.expired() ++} ++func (c *mtlsConfig) expired() bool { ++ return c.Expiry.Before(time.Now()) ++} +diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go +index 76efdb22772..3a3874df112 100644 +--- a/vendor/google.golang.org/api/internal/settings.go ++++ b/vendor/google.golang.org/api/internal/settings.go +@@ -46,6 +46,7 @@ type DialSettings struct { + SkipValidation bool + ImpersonationConfig *impersonate.Config + EnableDirectPath bool ++ EnableDirectPathXds bool + AllowNonDefaultServiceAccount bool + + // Google API system parameters. For more information please read: +diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go +index 7a4f6d8982e..46ad187ec11 100644 +--- a/vendor/google.golang.org/api/internal/version.go ++++ b/vendor/google.golang.org/api/internal/version.go +@@ -5,4 +5,4 @@ + package internal + + // Version is the current tagged release of the library. +-const Version = "0.114.0" ++const Version = "0.126.0" +diff --git a/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json b/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json +index 6490d4738f4..6a1aab9daf8 100644 +--- a/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json ++++ b/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json +@@ -1217,7 +1217,7 @@ + "type": "string" + }, + "pageSize": { +- "description": "A positive number that is the maximum number of results to return.", ++ "description": "A positive number that is the maximum number of results to return. The default and maximum value is 10,000. If a page_size \u003c= 0 or \u003e 10,000 is submitted, will instead return a maximum of 10,000 results.", + "format": "int32", + "location": "query", + "type": "integer" +@@ -1505,7 +1505,7 @@ + ] + }, + "list": { +- "description": "Lists the notification channels that have been created for the project.", ++ "description": "Lists the notification channels that have been created for the project. To list the types of notification channels that are supported, use the ListNotificationChannelDescriptors method.", + "flatPath": "v3/projects/{projectsId}/notificationChannels", + "httpMethod": "GET", + "id": "monitoring.projects.notificationChannels.list", +@@ -2714,7 +2714,7 @@ + } + } + }, +- "revision": "20230212", ++ "revision": "20230531", + "rootUrl": "https://monitoring.googleapis.com/", + "schemas": { + "Aggregation": { +@@ -2843,7 +2843,7 @@ + "type": "string" + }, + "conditions": { +- "description": "A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition.", ++ "description": "A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. If condition_monitoring_query_language is present, it must be the only condition.", + "items": { + "$ref": "Condition" + }, +@@ -2854,7 +2854,7 @@ + "description": "A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored." + }, + "displayName": { +- "description": "A short name or phrase used to identify the policy in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple policies in the same project. The name is limited to 512 Unicode characters.", ++ "description": "A short name or phrase used to identify the policy in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple policies in the same project. The name is limited to 512 Unicode characters.The convention for the display_name of a PrometheusQueryLanguageCondition is \"/\", where the and should be taken from the corresponding Prometheus configuration file. This convention is not enforced. In any case the display_name is not a unique key of the AlertPolicy.", + "type": "string" + }, + "documentation": { +@@ -2884,12 +2884,12 @@ + "additionalProperties": { + "type": "string" + }, +- "description": "User-supplied key/value data to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.", ++ "description": "User-supplied key/value data to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.Note that Prometheus and are valid Prometheus label names (https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). This means that they cannot be stored as is in user labels, because Prometheus labels may contain upper-case letters.", + "type": "object" + }, + "validity": { + "$ref": "Status", +- "description": "Read-only description of how the alert policy is invalid. OK if the alert policy is valid. If not OK, the alert policy will not generate incidents." ++ "description": "Read-only description of how the alert policy is invalid. This field is only set when the alert policy is invalid. An invalid alert policy will not generate incidents." + } + }, + "type": "object" +@@ -2903,6 +2903,13 @@ + "format": "google-duration", + "type": "string" + }, ++ "notificationChannelStrategy": { ++ "description": "Control how notifications will be sent out, on a per-channel basis.", ++ "items": { ++ "$ref": "NotificationChannelStrategy" ++ }, ++ "type": "array" ++ }, + "notificationRateLimit": { + "$ref": "NotificationRateLimit", + "description": "Required for alert policies with a LogMatch condition.This limit is not implemented for alert policies that are not log-based." +@@ -3504,7 +3511,7 @@ + "type": "object" + }, + "Exponential": { +- "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i). Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", ++ "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i).Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", + "id": "Exponential", + "properties": { + "growthFactor": { +@@ -4033,7 +4040,7 @@ + "type": "object" + }, + "Linear": { +- "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", ++ "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i).Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", + "id": "Linear", + "properties": { + "numFiniteBuckets": { +@@ -4932,6 +4939,25 @@ + }, + "type": "object" + }, ++ "NotificationChannelStrategy": { ++ "description": "Control over how the notification channels in notification_channels are notified when this alert fires, on a per-channel basis.", ++ "id": "NotificationChannelStrategy", ++ "properties": { ++ "notificationChannelNames": { ++ "description": "The full REST resource name for the notification channels that these settings apply to. Each of these correspond to the name field in one of the NotificationChannel objects referenced in the notification_channels field of this AlertPolicy. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] ", ++ "items": { ++ "type": "string" ++ }, ++ "type": "array" ++ }, ++ "renotifyInterval": { ++ "description": "The frequency at which to send reminder notifications for open incidents.", ++ "format": "google-duration", ++ "type": "string" ++ } ++ }, ++ "type": "object" ++ }, + "NotificationRateLimit": { + "description": "Control over the rate of notifications sent to this alert policy's notification channels.", + "id": "NotificationRateLimit", +@@ -5624,6 +5650,10 @@ + "description": "A protocol buffer message type.", + "id": "Type", + "properties": { ++ "edition": { ++ "description": "The source edition string, only valid when syntax is SYNTAX_EDITIONS.", ++ "type": "string" ++ }, + "fields": { + "description": "The list of fields.", + "items": { +@@ -5657,11 +5687,13 @@ + "description": "The source syntax.", + "enum": [ + "SYNTAX_PROTO2", +- "SYNTAX_PROTO3" ++ "SYNTAX_PROTO3", ++ "SYNTAX_EDITIONS" + ], + "enumDescriptions": [ + "Syntax proto2.", +- "Syntax proto3." ++ "Syntax proto3.", ++ "Syntax editions." + ], + "type": "string" + } +diff --git a/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go b/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go +index 6f700c3458a..0c5038567a4 100644 +--- a/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go ++++ b/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go +@@ -77,6 +77,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "monitoring:v3" + const apiName = "monitoring" +@@ -688,7 +689,8 @@ type AlertPolicy struct { + // combined conditions evaluate to true, then an incident is created. A + // policy can have from one to six conditions. If + // condition_time_series_query_language is present, it must be the only +- // condition. ++ // condition. If condition_monitoring_query_language is present, it must ++ // be the only condition. + Conditions []*Condition `json:"conditions,omitempty"` + + // CreationRecord: A read-only record of the creation of the alerting +@@ -699,7 +701,11 @@ type AlertPolicy struct { + // DisplayName: A short name or phrase used to identify the policy in + // dashboards, notifications, and incidents. To avoid confusion, don't + // use the same display name for multiple policies in the same project. +- // The name is limited to 512 Unicode characters. ++ // The name is limited to 512 Unicode characters.The convention for the ++ // display_name of a PrometheusQueryLanguageCondition is "/", where the ++ // and should be taken from the corresponding Prometheus configuration ++ // file. This convention is not enforced. In any case the display_name ++ // is not a unique key of the AlertPolicy. + DisplayName string `json:"displayName,omitempty"` + + // Documentation: Documentation that is included with notifications and +@@ -746,12 +752,16 @@ type AlertPolicy struct { + // 64 entries. Each key and value is limited to 63 Unicode characters or + // 128 bytes, whichever is smaller. Labels and values can contain only + // lowercase letters, numerals, underscores, and dashes. Keys must begin +- // with a letter. ++ // with a letter.Note that Prometheus and are valid Prometheus label ++ // names ++ // (https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). ++ // This means that they cannot be stored as is in user labels, because ++ // Prometheus labels may contain upper-case letters. + UserLabels map[string]string `json:"userLabels,omitempty"` + + // Validity: Read-only description of how the alert policy is invalid. +- // OK if the alert policy is valid. If not OK, the alert policy will not +- // generate incidents. ++ // This field is only set when the alert policy is invalid. An invalid ++ // alert policy will not generate incidents. + Validity *Status `json:"validity,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the +@@ -788,6 +798,10 @@ type AlertStrategy struct { + // long, any open incidents will close + AutoClose string `json:"autoClose,omitempty"` + ++ // NotificationChannelStrategy: Control how notifications will be sent ++ // out, on a per-channel basis. ++ NotificationChannelStrategy []*NotificationChannelStrategy `json:"notificationChannelStrategy,omitempty"` ++ + // NotificationRateLimit: Required for alert policies with a LogMatch + // condition.This limit is not implemented for alert policies that are + // not log-based. +@@ -2004,12 +2018,29 @@ func (s *Explicit) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++func (s *Explicit) UnmarshalJSON(data []byte) error { ++ type NoMethod Explicit ++ var s1 struct { ++ Bounds []gensupport.JSONFloat64 `json:"bounds"` ++ *NoMethod ++ } ++ s1.NoMethod = (*NoMethod)(s) ++ if err := json.Unmarshal(data, &s1); err != nil { ++ return err ++ } ++ s.Bounds = make([]float64, len(s1.Bounds)) ++ for i := range s1.Bounds { ++ s.Bounds[i] = float64(s1.Bounds[i]) ++ } ++ return nil ++} ++ + // Exponential: Specifies an exponential sequence of buckets that have a + // width that is proportional to the value of the lower bound. Each + // bucket represents a constant relative uncertainty on a specific value + // in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket + // i has the following boundaries:Upper bound (0 <= i < N-1): scale * +-// (growth_factor ^ i). Lower bound (1 <= i < N): scale * (growth_factor ++// (growth_factor ^ i).Lower bound (1 <= i < N): scale * (growth_factor + // ^ (i - 1)). + type Exponential struct { + // GrowthFactor: Must be greater than 1. +@@ -2922,7 +2953,7 @@ func (s *LatencyCriteria) MarshalJSON() ([]byte, error) { + // constant absolute uncertainty on the specific value in the + // bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has + // the following boundaries:Upper bound (0 <= i < N-1): offset + (width +-// * i). Lower bound (1 <= i < N): offset + (width * (i - 1)). ++// * i).Lower bound (1 <= i < N): offset + (width * (i - 1)). + type Linear struct { + // NumFiniteBuckets: Must be greater than 0. + NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"` +@@ -4679,6 +4710,47 @@ func (s *NotificationChannelDescriptor) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + ++// NotificationChannelStrategy: Control over how the notification ++// channels in notification_channels are notified when this alert fires, ++// on a per-channel basis. ++type NotificationChannelStrategy struct { ++ // NotificationChannelNames: The full REST resource name for the ++ // notification channels that these settings apply to. Each of these ++ // correspond to the name field in one of the NotificationChannel ++ // objects referenced in the notification_channels field of this ++ // AlertPolicy. The format is: ++ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] ++ NotificationChannelNames []string `json:"notificationChannelNames,omitempty"` ++ ++ // RenotifyInterval: The frequency at which to send reminder ++ // notifications for open incidents. ++ RenotifyInterval string `json:"renotifyInterval,omitempty"` ++ ++ // ForceSendFields is a list of field names (e.g. ++ // "NotificationChannelNames") to unconditionally include in API ++ // requests. By default, fields with empty or default values are omitted ++ // from API requests. However, any non-pointer, non-interface field ++ // appearing in ForceSendFields will be sent to the server regardless of ++ // whether the field is empty or not. This may be used to include empty ++ // fields in Patch requests. ++ ForceSendFields []string `json:"-"` ++ ++ // NullFields is a list of field names (e.g. "NotificationChannelNames") ++ // to include in API requests with the JSON null value. By default, ++ // fields with empty values are omitted from API requests. However, any ++ // field with an empty value appearing in NullFields will be sent to the ++ // server as null. It is an error if a field in this list has a ++ // non-empty value. This may be used to include null fields in Patch ++ // requests. ++ NullFields []string `json:"-"` ++} ++ ++func (s *NotificationChannelStrategy) MarshalJSON() ([]byte, error) { ++ type NoMethod NotificationChannelStrategy ++ raw := NoMethod(*s) ++ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) ++} ++ + // NotificationRateLimit: Control over the rate of notifications sent to + // this alert policy's notification channels. + type NotificationRateLimit struct { +@@ -6003,6 +6075,10 @@ func (s *Trigger) UnmarshalJSON(data []byte) error { + + // Type: A protocol buffer message type. + type Type struct { ++ // Edition: The source edition string, only valid when syntax is ++ // SYNTAX_EDITIONS. ++ Edition string `json:"edition,omitempty"` ++ + // Fields: The list of fields. + Fields []*Field `json:"fields,omitempty"` + +@@ -6024,9 +6100,10 @@ type Type struct { + // Possible values: + // "SYNTAX_PROTO2" - Syntax proto2. + // "SYNTAX_PROTO3" - Syntax proto3. ++ // "SYNTAX_EDITIONS" - Syntax editions. + Syntax string `json:"syntax,omitempty"` + +- // ForceSendFields is a list of field names (e.g. "Fields") to ++ // ForceSendFields is a list of field names (e.g. "Edition") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be +@@ -6034,8 +6111,8 @@ type Type struct { + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + +- // NullFields is a list of field names (e.g. "Fields") to include in API +- // requests with the JSON null value. By default, fields with empty ++ // NullFields is a list of field names (e.g. "Edition") to include in ++ // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. +@@ -11235,7 +11312,9 @@ func (c *ProjectsMetricDescriptorsListCall) Filter(filter string) *ProjectsMetri + } + + // PageSize sets the optional parameter "pageSize": A positive number +-// that is the maximum number of results to return. ++// that is the maximum number of results to return. The default and ++// maximum value is 10,000. If a page_size <= 0 or > 10,000 is ++// submitted, will instead return a maximum of 10,000 results. + func (c *ProjectsMetricDescriptorsListCall) PageSize(pageSize int64) *ProjectsMetricDescriptorsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +@@ -11370,7 +11449,7 @@ func (c *ProjectsMetricDescriptorsListCall) Do(opts ...googleapi.CallOption) (*L + // "type": "string" + // }, + // "pageSize": { +- // "description": "A positive number that is the maximum number of results to return.", ++ // "description": "A positive number that is the maximum number of results to return. The default and maximum value is 10,000. If a page_size \u003c= 0 or \u003e 10,000 is submitted, will instead return a maximum of 10,000 results.", + // "format": "int32", + // "location": "query", + // "type": "integer" +@@ -12787,7 +12866,8 @@ type ProjectsNotificationChannelsListCall struct { + } + + // List: Lists the notification channels that have been created for the +-// project. ++// project. To list the types of notification channels that are ++// supported, use the ListNotificationChannelDescriptors method. + // + // - name: The project + // (https://cloud.google.com/monitoring/api/v3#project_name) on which +@@ -12938,7 +13018,7 @@ func (c *ProjectsNotificationChannelsListCall) Do(opts ...googleapi.CallOption) + } + return ret, nil + // { +- // "description": "Lists the notification channels that have been created for the project.", ++ // "description": "Lists the notification channels that have been created for the project. To list the types of notification channels that are supported, use the ListNotificationChannelDescriptors method.", + // "flatPath": "v3/projects/{projectsId}/notificationChannels", + // "httpMethod": "GET", + // "id": "monitoring.projects.notificationChannels.list", +diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go +index cc7ebfe277b..3b8461d1da9 100644 +--- a/vendor/google.golang.org/api/option/internaloption/internaloption.go ++++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go +@@ -67,6 +67,21 @@ func (e enableDirectPath) Apply(o *internal.DialSettings) { + o.EnableDirectPath = bool(e) + } + ++// EnableDirectPathXds returns a ClientOption that overrides the default ++// DirectPath type. It is only valid when DirectPath is enabled. ++// ++// It should only be used internally by generated clients. ++// This is an EXPERIMENTAL API and may be changed or removed in the future. ++func EnableDirectPathXds() option.ClientOption { ++ return enableDirectPathXds(true) ++} ++ ++type enableDirectPathXds bool ++ ++func (x enableDirectPathXds) Apply(o *internal.DialSettings) { ++ o.EnableDirectPathXds = bool(x) ++} ++ + // AllowNonDefaultServiceAccount returns a ClientOption that overrides the default + // requirement for using the default service account for DirectPath. + // +diff --git a/vendor/google.golang.org/api/tpu/v1/tpu-api.json b/vendor/google.golang.org/api/tpu/v1/tpu-api.json +index 3dab07c96aa..820cbf9681d 100644 +--- a/vendor/google.golang.org/api/tpu/v1/tpu-api.json ++++ b/vendor/google.golang.org/api/tpu/v1/tpu-api.json +@@ -537,7 +537,7 @@ + ] + }, + "list": { +- "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", ++ "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", + "httpMethod": "GET", + "id": "tpu.projects.locations.operations.list", +@@ -659,7 +659,7 @@ + } + } + }, +- "revision": "20220725", ++ "revision": "20230420", + "rootUrl": "https://tpu.googleapis.com/", + "schemas": { + "AcceleratorType": { +@@ -795,7 +795,7 @@ + "type": "object" + }, + "Location": { +- "description": "A resource that represents Google Cloud Platform location.", ++ "description": "A resource that represents a Google Cloud location.", + "id": "Location", + "properties": { + "displayName": { +diff --git a/vendor/google.golang.org/api/tpu/v1/tpu-gen.go b/vendor/google.golang.org/api/tpu/v1/tpu-gen.go +index 12b03349403..733464667d2 100644 +--- a/vendor/google.golang.org/api/tpu/v1/tpu-gen.go ++++ b/vendor/google.golang.org/api/tpu/v1/tpu-gen.go +@@ -71,6 +71,7 @@ var _ = errors.New + var _ = strings.Replace + var _ = context.Canceled + var _ = internaloption.WithDefaultEndpoint ++var _ = internal.Version + + const apiId = "tpu:v1" + const apiName = "tpu" +@@ -442,7 +443,7 @@ func (s *ListTensorFlowVersionsResponse) MarshalJSON() ([]byte, error) { + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) + } + +-// Location: A resource that represents Google Cloud Platform location. ++// Location: A resource that represents a Google Cloud location. + type Location struct { + // DisplayName: The friendly name for this location, typically a nearby + // city name. For example, "Tokyo". +@@ -3186,14 +3187,7 @@ type ProjectsLocationsOperationsListCall struct { + + // List: Lists operations that match the specified filter in the + // request. If the server doesn't support this method, it returns +-// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to +-// override the binding to use different resource name schemes, such as +-// `users/*/operations`. To override the binding, API services can add a +-// binding such as "/v1/{name=users/*}/operations" to their service +-// configuration. For backwards compatibility, the default name includes +-// the operations collection id, however overriding users must ensure +-// the name binding is the parent resource, without the operations +-// collection id. ++// `UNIMPLEMENTED`. + // + // - name: The name of the operation's parent resource. + func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall { +@@ -3322,7 +3316,7 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( + } + return ret, nil + // { +- // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", ++ // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", + // "httpMethod": "GET", + // "id": "tpu.projects.locations.operations.list", +diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go +index 403509d08f6..eca0c3ba795 100644 +--- a/vendor/google.golang.org/api/transport/http/dial.go ++++ b/vendor/google.golang.org/api/transport/http/dial.go +@@ -33,7 +33,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, + if err != nil { + return nil, "", err + } +- clientCertSource, endpoint, err := internal.GetClientCertificateSourceAndEndpoint(settings) ++ clientCertSource, dialTLSContext, endpoint, err := internal.GetHTTPTransportConfigAndEndpoint(settings) + if err != nil { + return nil, "", err + } +@@ -41,7 +41,8 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, + if settings.HTTPClient != nil { + return settings.HTTPClient, endpoint, nil + } +- trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource), settings) ++ ++ trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource, dialTLSContext), settings) + if err != nil { + return nil, "", err + } +@@ -152,7 +153,7 @@ var appengineUrlfetchHook func(context.Context) http.RoundTripper + // Otherwise, use a default transport, taking most defaults from + // http.DefaultTransport. + // If TLSCertificate is available, set TLSClientConfig as well. +-func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper { ++func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { + if appengineUrlfetchHook != nil { + return appengineUrlfetchHook(ctx) + } +@@ -171,6 +172,10 @@ func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) htt + GetClientCertificate: clientCertSource, + } + } ++ if dialTLSContext != nil { ++ // If DialTLSContext is set, TLSClientConfig wil be ignored ++ trans.DialTLSContext = dialTLSContext ++ } + + configureHTTP2(trans) + +diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go +new file mode 100644 +index 00000000000..4ec872e4606 +--- /dev/null ++++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go +@@ -0,0 +1,2822 @@ ++// Code generated by protoc-gen-go. DO NOT EDIT. ++// source: google.golang.org/appengine/internal/socket/socket_service.proto ++ ++package socket ++ ++import proto "github.com/golang/protobuf/proto" ++import fmt "fmt" ++import math "math" ++ ++// Reference imports to suppress errors if they are not otherwise used. ++var _ = proto.Marshal ++var _ = fmt.Errorf ++var _ = math.Inf ++ ++// This is a compile-time assertion to ensure that this generated file ++// is compatible with the proto package it is being compiled against. ++// A compilation error at this line likely means your copy of the ++// proto package needs to be updated. ++const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package ++ ++type RemoteSocketServiceError_ErrorCode int32 ++ ++const ( ++ RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 ++ RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 ++ RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 ++ RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 ++ RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 ++ RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 ++) ++ ++var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ ++ 1: "SYSTEM_ERROR", ++ 2: "GAI_ERROR", ++ 4: "FAILURE", ++ 5: "PERMISSION_DENIED", ++ 6: "INVALID_REQUEST", ++ 7: "SOCKET_CLOSED", ++} ++var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ ++ "SYSTEM_ERROR": 1, ++ "GAI_ERROR": 2, ++ "FAILURE": 4, ++ "PERMISSION_DENIED": 5, ++ "INVALID_REQUEST": 6, ++ "SOCKET_CLOSED": 7, ++} ++ ++func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { ++ p := new(RemoteSocketServiceError_ErrorCode) ++ *p = x ++ return p ++} ++func (x RemoteSocketServiceError_ErrorCode) String() string { ++ return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) ++} ++func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") ++ if err != nil { ++ return err ++ } ++ *x = RemoteSocketServiceError_ErrorCode(value) ++ return nil ++} ++func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} ++} ++ ++type RemoteSocketServiceError_SystemError int32 ++ ++const ( ++ RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 ++ RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 ++ RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 ++ RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 ++ RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 ++ RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 ++ RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 ++ RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 ++ RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 ++ RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 ++ RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 ++ RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 ++ RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 ++ RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 ++ RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 ++ RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 ++ RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 ++ RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 ++ RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 ++ RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 ++ RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 ++ RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 ++ RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 ++ RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 ++ RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 ++ RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 ++ RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 ++ RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 ++ RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 ++ RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 ++ RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 ++ RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 ++ RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 ++ RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 ++ RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 ++ RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 ++ RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 ++ RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 ++ RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 ++ RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 ++ RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 ++ RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 ++ RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 ++ RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 ++ RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 ++ RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 ++ RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 ++ RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 ++ RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 ++ RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 ++ RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 ++ RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 ++ RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 ++ RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 ++ RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 ++ RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 ++ RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 ++ RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 ++ RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 ++ RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 ++ RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 ++ RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 ++ RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 ++ RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 ++ RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 ++ RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 ++ RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 ++ RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 ++ RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 ++ RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 ++ RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 ++ RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 ++ RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 ++ RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 ++ RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 ++ RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 ++ RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 ++ RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 ++ RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 ++ RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 ++ RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 ++ RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 ++ RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 ++ RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 ++ RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 ++ RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 ++ RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 ++ RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 ++ RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 ++ RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 ++ RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 ++ RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 ++ RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 ++ RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 ++ RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 ++ RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 ++ RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 ++ RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 ++ RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 ++ RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 ++ RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 ++ RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 ++ RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 ++ RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 ++ RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 ++ RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 ++ RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 ++ RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 ++ RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 ++ RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 ++ RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 ++ RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 ++ RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 ++ RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 ++ RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 ++ RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 ++ RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 ++ RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 ++ RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 ++ RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 ++ RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 ++ RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 ++ RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 ++ RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 ++ RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 ++ RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 ++ RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 ++ RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 ++ RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 ++ RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 ++ RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 ++ RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 ++ RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 ++ RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 ++) ++ ++var RemoteSocketServiceError_SystemError_name = map[int32]string{ ++ 0: "SYS_SUCCESS", ++ 1: "SYS_EPERM", ++ 2: "SYS_ENOENT", ++ 3: "SYS_ESRCH", ++ 4: "SYS_EINTR", ++ 5: "SYS_EIO", ++ 6: "SYS_ENXIO", ++ 7: "SYS_E2BIG", ++ 8: "SYS_ENOEXEC", ++ 9: "SYS_EBADF", ++ 10: "SYS_ECHILD", ++ 11: "SYS_EAGAIN", ++ // Duplicate value: 11: "SYS_EWOULDBLOCK", ++ 12: "SYS_ENOMEM", ++ 13: "SYS_EACCES", ++ 14: "SYS_EFAULT", ++ 15: "SYS_ENOTBLK", ++ 16: "SYS_EBUSY", ++ 17: "SYS_EEXIST", ++ 18: "SYS_EXDEV", ++ 19: "SYS_ENODEV", ++ 20: "SYS_ENOTDIR", ++ 21: "SYS_EISDIR", ++ 22: "SYS_EINVAL", ++ 23: "SYS_ENFILE", ++ 24: "SYS_EMFILE", ++ 25: "SYS_ENOTTY", ++ 26: "SYS_ETXTBSY", ++ 27: "SYS_EFBIG", ++ 28: "SYS_ENOSPC", ++ 29: "SYS_ESPIPE", ++ 30: "SYS_EROFS", ++ 31: "SYS_EMLINK", ++ 32: "SYS_EPIPE", ++ 33: "SYS_EDOM", ++ 34: "SYS_ERANGE", ++ 35: "SYS_EDEADLK", ++ // Duplicate value: 35: "SYS_EDEADLOCK", ++ 36: "SYS_ENAMETOOLONG", ++ 37: "SYS_ENOLCK", ++ 38: "SYS_ENOSYS", ++ 39: "SYS_ENOTEMPTY", ++ 40: "SYS_ELOOP", ++ 42: "SYS_ENOMSG", ++ 43: "SYS_EIDRM", ++ 44: "SYS_ECHRNG", ++ 45: "SYS_EL2NSYNC", ++ 46: "SYS_EL3HLT", ++ 47: "SYS_EL3RST", ++ 48: "SYS_ELNRNG", ++ 49: "SYS_EUNATCH", ++ 50: "SYS_ENOCSI", ++ 51: "SYS_EL2HLT", ++ 52: "SYS_EBADE", ++ 53: "SYS_EBADR", ++ 54: "SYS_EXFULL", ++ 55: "SYS_ENOANO", ++ 56: "SYS_EBADRQC", ++ 57: "SYS_EBADSLT", ++ 59: "SYS_EBFONT", ++ 60: "SYS_ENOSTR", ++ 61: "SYS_ENODATA", ++ 62: "SYS_ETIME", ++ 63: "SYS_ENOSR", ++ 64: "SYS_ENONET", ++ 65: "SYS_ENOPKG", ++ 66: "SYS_EREMOTE", ++ 67: "SYS_ENOLINK", ++ 68: "SYS_EADV", ++ 69: "SYS_ESRMNT", ++ 70: "SYS_ECOMM", ++ 71: "SYS_EPROTO", ++ 72: "SYS_EMULTIHOP", ++ 73: "SYS_EDOTDOT", ++ 74: "SYS_EBADMSG", ++ 75: "SYS_EOVERFLOW", ++ 76: "SYS_ENOTUNIQ", ++ 77: "SYS_EBADFD", ++ 78: "SYS_EREMCHG", ++ 79: "SYS_ELIBACC", ++ 80: "SYS_ELIBBAD", ++ 81: "SYS_ELIBSCN", ++ 82: "SYS_ELIBMAX", ++ 83: "SYS_ELIBEXEC", ++ 84: "SYS_EILSEQ", ++ 85: "SYS_ERESTART", ++ 86: "SYS_ESTRPIPE", ++ 87: "SYS_EUSERS", ++ 88: "SYS_ENOTSOCK", ++ 89: "SYS_EDESTADDRREQ", ++ 90: "SYS_EMSGSIZE", ++ 91: "SYS_EPROTOTYPE", ++ 92: "SYS_ENOPROTOOPT", ++ 93: "SYS_EPROTONOSUPPORT", ++ 94: "SYS_ESOCKTNOSUPPORT", ++ 95: "SYS_EOPNOTSUPP", ++ // Duplicate value: 95: "SYS_ENOTSUP", ++ 96: "SYS_EPFNOSUPPORT", ++ 97: "SYS_EAFNOSUPPORT", ++ 98: "SYS_EADDRINUSE", ++ 99: "SYS_EADDRNOTAVAIL", ++ 100: "SYS_ENETDOWN", ++ 101: "SYS_ENETUNREACH", ++ 102: "SYS_ENETRESET", ++ 103: "SYS_ECONNABORTED", ++ 104: "SYS_ECONNRESET", ++ 105: "SYS_ENOBUFS", ++ 106: "SYS_EISCONN", ++ 107: "SYS_ENOTCONN", ++ 108: "SYS_ESHUTDOWN", ++ 109: "SYS_ETOOMANYREFS", ++ 110: "SYS_ETIMEDOUT", ++ 111: "SYS_ECONNREFUSED", ++ 112: "SYS_EHOSTDOWN", ++ 113: "SYS_EHOSTUNREACH", ++ 114: "SYS_EALREADY", ++ 115: "SYS_EINPROGRESS", ++ 116: "SYS_ESTALE", ++ 117: "SYS_EUCLEAN", ++ 118: "SYS_ENOTNAM", ++ 119: "SYS_ENAVAIL", ++ 120: "SYS_EISNAM", ++ 121: "SYS_EREMOTEIO", ++ 122: "SYS_EDQUOT", ++ 123: "SYS_ENOMEDIUM", ++ 124: "SYS_EMEDIUMTYPE", ++ 125: "SYS_ECANCELED", ++ 126: "SYS_ENOKEY", ++ 127: "SYS_EKEYEXPIRED", ++ 128: "SYS_EKEYREVOKED", ++ 129: "SYS_EKEYREJECTED", ++ 130: "SYS_EOWNERDEAD", ++ 131: "SYS_ENOTRECOVERABLE", ++ 132: "SYS_ERFKILL", ++} ++var RemoteSocketServiceError_SystemError_value = map[string]int32{ ++ "SYS_SUCCESS": 0, ++ "SYS_EPERM": 1, ++ "SYS_ENOENT": 2, ++ "SYS_ESRCH": 3, ++ "SYS_EINTR": 4, ++ "SYS_EIO": 5, ++ "SYS_ENXIO": 6, ++ "SYS_E2BIG": 7, ++ "SYS_ENOEXEC": 8, ++ "SYS_EBADF": 9, ++ "SYS_ECHILD": 10, ++ "SYS_EAGAIN": 11, ++ "SYS_EWOULDBLOCK": 11, ++ "SYS_ENOMEM": 12, ++ "SYS_EACCES": 13, ++ "SYS_EFAULT": 14, ++ "SYS_ENOTBLK": 15, ++ "SYS_EBUSY": 16, ++ "SYS_EEXIST": 17, ++ "SYS_EXDEV": 18, ++ "SYS_ENODEV": 19, ++ "SYS_ENOTDIR": 20, ++ "SYS_EISDIR": 21, ++ "SYS_EINVAL": 22, ++ "SYS_ENFILE": 23, ++ "SYS_EMFILE": 24, ++ "SYS_ENOTTY": 25, ++ "SYS_ETXTBSY": 26, ++ "SYS_EFBIG": 27, ++ "SYS_ENOSPC": 28, ++ "SYS_ESPIPE": 29, ++ "SYS_EROFS": 30, ++ "SYS_EMLINK": 31, ++ "SYS_EPIPE": 32, ++ "SYS_EDOM": 33, ++ "SYS_ERANGE": 34, ++ "SYS_EDEADLK": 35, ++ "SYS_EDEADLOCK": 35, ++ "SYS_ENAMETOOLONG": 36, ++ "SYS_ENOLCK": 37, ++ "SYS_ENOSYS": 38, ++ "SYS_ENOTEMPTY": 39, ++ "SYS_ELOOP": 40, ++ "SYS_ENOMSG": 42, ++ "SYS_EIDRM": 43, ++ "SYS_ECHRNG": 44, ++ "SYS_EL2NSYNC": 45, ++ "SYS_EL3HLT": 46, ++ "SYS_EL3RST": 47, ++ "SYS_ELNRNG": 48, ++ "SYS_EUNATCH": 49, ++ "SYS_ENOCSI": 50, ++ "SYS_EL2HLT": 51, ++ "SYS_EBADE": 52, ++ "SYS_EBADR": 53, ++ "SYS_EXFULL": 54, ++ "SYS_ENOANO": 55, ++ "SYS_EBADRQC": 56, ++ "SYS_EBADSLT": 57, ++ "SYS_EBFONT": 59, ++ "SYS_ENOSTR": 60, ++ "SYS_ENODATA": 61, ++ "SYS_ETIME": 62, ++ "SYS_ENOSR": 63, ++ "SYS_ENONET": 64, ++ "SYS_ENOPKG": 65, ++ "SYS_EREMOTE": 66, ++ "SYS_ENOLINK": 67, ++ "SYS_EADV": 68, ++ "SYS_ESRMNT": 69, ++ "SYS_ECOMM": 70, ++ "SYS_EPROTO": 71, ++ "SYS_EMULTIHOP": 72, ++ "SYS_EDOTDOT": 73, ++ "SYS_EBADMSG": 74, ++ "SYS_EOVERFLOW": 75, ++ "SYS_ENOTUNIQ": 76, ++ "SYS_EBADFD": 77, ++ "SYS_EREMCHG": 78, ++ "SYS_ELIBACC": 79, ++ "SYS_ELIBBAD": 80, ++ "SYS_ELIBSCN": 81, ++ "SYS_ELIBMAX": 82, ++ "SYS_ELIBEXEC": 83, ++ "SYS_EILSEQ": 84, ++ "SYS_ERESTART": 85, ++ "SYS_ESTRPIPE": 86, ++ "SYS_EUSERS": 87, ++ "SYS_ENOTSOCK": 88, ++ "SYS_EDESTADDRREQ": 89, ++ "SYS_EMSGSIZE": 90, ++ "SYS_EPROTOTYPE": 91, ++ "SYS_ENOPROTOOPT": 92, ++ "SYS_EPROTONOSUPPORT": 93, ++ "SYS_ESOCKTNOSUPPORT": 94, ++ "SYS_EOPNOTSUPP": 95, ++ "SYS_ENOTSUP": 95, ++ "SYS_EPFNOSUPPORT": 96, ++ "SYS_EAFNOSUPPORT": 97, ++ "SYS_EADDRINUSE": 98, ++ "SYS_EADDRNOTAVAIL": 99, ++ "SYS_ENETDOWN": 100, ++ "SYS_ENETUNREACH": 101, ++ "SYS_ENETRESET": 102, ++ "SYS_ECONNABORTED": 103, ++ "SYS_ECONNRESET": 104, ++ "SYS_ENOBUFS": 105, ++ "SYS_EISCONN": 106, ++ "SYS_ENOTCONN": 107, ++ "SYS_ESHUTDOWN": 108, ++ "SYS_ETOOMANYREFS": 109, ++ "SYS_ETIMEDOUT": 110, ++ "SYS_ECONNREFUSED": 111, ++ "SYS_EHOSTDOWN": 112, ++ "SYS_EHOSTUNREACH": 113, ++ "SYS_EALREADY": 114, ++ "SYS_EINPROGRESS": 115, ++ "SYS_ESTALE": 116, ++ "SYS_EUCLEAN": 117, ++ "SYS_ENOTNAM": 118, ++ "SYS_ENAVAIL": 119, ++ "SYS_EISNAM": 120, ++ "SYS_EREMOTEIO": 121, ++ "SYS_EDQUOT": 122, ++ "SYS_ENOMEDIUM": 123, ++ "SYS_EMEDIUMTYPE": 124, ++ "SYS_ECANCELED": 125, ++ "SYS_ENOKEY": 126, ++ "SYS_EKEYEXPIRED": 127, ++ "SYS_EKEYREVOKED": 128, ++ "SYS_EKEYREJECTED": 129, ++ "SYS_EOWNERDEAD": 130, ++ "SYS_ENOTRECOVERABLE": 131, ++ "SYS_ERFKILL": 132, ++} ++ ++func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { ++ p := new(RemoteSocketServiceError_SystemError) ++ *p = x ++ return p ++} ++func (x RemoteSocketServiceError_SystemError) String() string { ++ return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) ++} ++func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") ++ if err != nil { ++ return err ++ } ++ *x = RemoteSocketServiceError_SystemError(value) ++ return nil ++} ++func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} ++} ++ ++type CreateSocketRequest_SocketFamily int32 ++ ++const ( ++ CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 ++ CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 ++) ++ ++var CreateSocketRequest_SocketFamily_name = map[int32]string{ ++ 1: "IPv4", ++ 2: "IPv6", ++} ++var CreateSocketRequest_SocketFamily_value = map[string]int32{ ++ "IPv4": 1, ++ "IPv6": 2, ++} ++ ++func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { ++ p := new(CreateSocketRequest_SocketFamily) ++ *p = x ++ return p ++} ++func (x CreateSocketRequest_SocketFamily) String() string { ++ return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) ++} ++func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") ++ if err != nil { ++ return err ++ } ++ *x = CreateSocketRequest_SocketFamily(value) ++ return nil ++} ++func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} ++} ++ ++type CreateSocketRequest_SocketProtocol int32 ++ ++const ( ++ CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 ++ CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 ++) ++ ++var CreateSocketRequest_SocketProtocol_name = map[int32]string{ ++ 1: "TCP", ++ 2: "UDP", ++} ++var CreateSocketRequest_SocketProtocol_value = map[string]int32{ ++ "TCP": 1, ++ "UDP": 2, ++} ++ ++func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { ++ p := new(CreateSocketRequest_SocketProtocol) ++ *p = x ++ return p ++} ++func (x CreateSocketRequest_SocketProtocol) String() string { ++ return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) ++} ++func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") ++ if err != nil { ++ return err ++ } ++ *x = CreateSocketRequest_SocketProtocol(value) ++ return nil ++} ++func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} ++} ++ ++type SocketOption_SocketOptionLevel int32 ++ ++const ( ++ SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 ++ SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 ++ SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 ++ SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 ++) ++ ++var SocketOption_SocketOptionLevel_name = map[int32]string{ ++ 0: "SOCKET_SOL_IP", ++ 1: "SOCKET_SOL_SOCKET", ++ 6: "SOCKET_SOL_TCP", ++ 17: "SOCKET_SOL_UDP", ++} ++var SocketOption_SocketOptionLevel_value = map[string]int32{ ++ "SOCKET_SOL_IP": 0, ++ "SOCKET_SOL_SOCKET": 1, ++ "SOCKET_SOL_TCP": 6, ++ "SOCKET_SOL_UDP": 17, ++} ++ ++func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { ++ p := new(SocketOption_SocketOptionLevel) ++ *p = x ++ return p ++} ++func (x SocketOption_SocketOptionLevel) String() string { ++ return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) ++} ++func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") ++ if err != nil { ++ return err ++ } ++ *x = SocketOption_SocketOptionLevel(value) ++ return nil ++} ++func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} ++} ++ ++type SocketOption_SocketOptionName int32 ++ ++const ( ++ SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 ++ SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 ++ SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 ++ SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 ++ SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 ++ SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 ++ SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 ++ SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 ++ SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 ++ SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 ++ SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 ++ SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 ++ SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 ++ SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 ++ SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 ++ SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 ++ SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 ++ SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 ++ SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 ++ SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 ++ SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 ++ SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 ++ SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 ++ SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 ++ SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 ++ SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 ++ SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 ++ SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 ++ SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 ++) ++ ++var SocketOption_SocketOptionName_name = map[int32]string{ ++ 1: "SOCKET_SO_DEBUG", ++ 2: "SOCKET_SO_REUSEADDR", ++ 3: "SOCKET_SO_TYPE", ++ 4: "SOCKET_SO_ERROR", ++ 5: "SOCKET_SO_DONTROUTE", ++ 6: "SOCKET_SO_BROADCAST", ++ 7: "SOCKET_SO_SNDBUF", ++ 8: "SOCKET_SO_RCVBUF", ++ 9: "SOCKET_SO_KEEPALIVE", ++ 10: "SOCKET_SO_OOBINLINE", ++ 13: "SOCKET_SO_LINGER", ++ 20: "SOCKET_SO_RCVTIMEO", ++ 21: "SOCKET_SO_SNDTIMEO", ++ // Duplicate value: 1: "SOCKET_IP_TOS", ++ // Duplicate value: 2: "SOCKET_IP_TTL", ++ // Duplicate value: 3: "SOCKET_IP_HDRINCL", ++ // Duplicate value: 4: "SOCKET_IP_OPTIONS", ++ // Duplicate value: 1: "SOCKET_TCP_NODELAY", ++ // Duplicate value: 2: "SOCKET_TCP_MAXSEG", ++ // Duplicate value: 3: "SOCKET_TCP_CORK", ++ // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", ++ // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", ++ // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", ++ // Duplicate value: 7: "SOCKET_TCP_SYNCNT", ++ // Duplicate value: 8: "SOCKET_TCP_LINGER2", ++ // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", ++ // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", ++ 11: "SOCKET_TCP_INFO", ++ 12: "SOCKET_TCP_QUICKACK", ++} ++var SocketOption_SocketOptionName_value = map[string]int32{ ++ "SOCKET_SO_DEBUG": 1, ++ "SOCKET_SO_REUSEADDR": 2, ++ "SOCKET_SO_TYPE": 3, ++ "SOCKET_SO_ERROR": 4, ++ "SOCKET_SO_DONTROUTE": 5, ++ "SOCKET_SO_BROADCAST": 6, ++ "SOCKET_SO_SNDBUF": 7, ++ "SOCKET_SO_RCVBUF": 8, ++ "SOCKET_SO_KEEPALIVE": 9, ++ "SOCKET_SO_OOBINLINE": 10, ++ "SOCKET_SO_LINGER": 13, ++ "SOCKET_SO_RCVTIMEO": 20, ++ "SOCKET_SO_SNDTIMEO": 21, ++ "SOCKET_IP_TOS": 1, ++ "SOCKET_IP_TTL": 2, ++ "SOCKET_IP_HDRINCL": 3, ++ "SOCKET_IP_OPTIONS": 4, ++ "SOCKET_TCP_NODELAY": 1, ++ "SOCKET_TCP_MAXSEG": 2, ++ "SOCKET_TCP_CORK": 3, ++ "SOCKET_TCP_KEEPIDLE": 4, ++ "SOCKET_TCP_KEEPINTVL": 5, ++ "SOCKET_TCP_KEEPCNT": 6, ++ "SOCKET_TCP_SYNCNT": 7, ++ "SOCKET_TCP_LINGER2": 8, ++ "SOCKET_TCP_DEFER_ACCEPT": 9, ++ "SOCKET_TCP_WINDOW_CLAMP": 10, ++ "SOCKET_TCP_INFO": 11, ++ "SOCKET_TCP_QUICKACK": 12, ++} ++ ++func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { ++ p := new(SocketOption_SocketOptionName) ++ *p = x ++ return p ++} ++func (x SocketOption_SocketOptionName) String() string { ++ return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) ++} ++func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") ++ if err != nil { ++ return err ++ } ++ *x = SocketOption_SocketOptionName(value) ++ return nil ++} ++func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} ++} ++ ++type ShutDownRequest_How int32 ++ ++const ( ++ ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 ++ ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 ++ ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 ++) ++ ++var ShutDownRequest_How_name = map[int32]string{ ++ 1: "SOCKET_SHUT_RD", ++ 2: "SOCKET_SHUT_WR", ++ 3: "SOCKET_SHUT_RDWR", ++} ++var ShutDownRequest_How_value = map[string]int32{ ++ "SOCKET_SHUT_RD": 1, ++ "SOCKET_SHUT_WR": 2, ++ "SOCKET_SHUT_RDWR": 3, ++} ++ ++func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { ++ p := new(ShutDownRequest_How) ++ *p = x ++ return p ++} ++func (x ShutDownRequest_How) String() string { ++ return proto.EnumName(ShutDownRequest_How_name, int32(x)) ++} ++func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") ++ if err != nil { ++ return err ++ } ++ *x = ShutDownRequest_How(value) ++ return nil ++} ++func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} ++} ++ ++type ReceiveRequest_Flags int32 ++ ++const ( ++ ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 ++ ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 ++) ++ ++var ReceiveRequest_Flags_name = map[int32]string{ ++ 1: "MSG_OOB", ++ 2: "MSG_PEEK", ++} ++var ReceiveRequest_Flags_value = map[string]int32{ ++ "MSG_OOB": 1, ++ "MSG_PEEK": 2, ++} ++ ++func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { ++ p := new(ReceiveRequest_Flags) ++ *p = x ++ return p ++} ++func (x ReceiveRequest_Flags) String() string { ++ return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) ++} ++func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") ++ if err != nil { ++ return err ++ } ++ *x = ReceiveRequest_Flags(value) ++ return nil ++} ++func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} ++} ++ ++type PollEvent_PollEventFlag int32 ++ ++const ( ++ PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 ++ PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 ++ PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 ++ PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 ++ PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 ++ PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 ++ PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 ++ PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 ++ PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 ++ PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 ++ PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 ++ PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 ++ PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 ++ PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 ++) ++ ++var PollEvent_PollEventFlag_name = map[int32]string{ ++ 0: "SOCKET_POLLNONE", ++ 1: "SOCKET_POLLIN", ++ 2: "SOCKET_POLLPRI", ++ 4: "SOCKET_POLLOUT", ++ 8: "SOCKET_POLLERR", ++ 16: "SOCKET_POLLHUP", ++ 32: "SOCKET_POLLNVAL", ++ 64: "SOCKET_POLLRDNORM", ++ 128: "SOCKET_POLLRDBAND", ++ 256: "SOCKET_POLLWRNORM", ++ 512: "SOCKET_POLLWRBAND", ++ 1024: "SOCKET_POLLMSG", ++ 4096: "SOCKET_POLLREMOVE", ++ 8192: "SOCKET_POLLRDHUP", ++} ++var PollEvent_PollEventFlag_value = map[string]int32{ ++ "SOCKET_POLLNONE": 0, ++ "SOCKET_POLLIN": 1, ++ "SOCKET_POLLPRI": 2, ++ "SOCKET_POLLOUT": 4, ++ "SOCKET_POLLERR": 8, ++ "SOCKET_POLLHUP": 16, ++ "SOCKET_POLLNVAL": 32, ++ "SOCKET_POLLRDNORM": 64, ++ "SOCKET_POLLRDBAND": 128, ++ "SOCKET_POLLWRNORM": 256, ++ "SOCKET_POLLWRBAND": 512, ++ "SOCKET_POLLMSG": 1024, ++ "SOCKET_POLLREMOVE": 4096, ++ "SOCKET_POLLRDHUP": 8192, ++} ++ ++func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { ++ p := new(PollEvent_PollEventFlag) ++ *p = x ++ return p ++} ++func (x PollEvent_PollEventFlag) String() string { ++ return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) ++} ++func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") ++ if err != nil { ++ return err ++ } ++ *x = PollEvent_PollEventFlag(value) ++ return nil ++} ++func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} ++} ++ ++type ResolveReply_ErrorCode int32 ++ ++const ( ++ ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 ++ ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 ++ ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 ++ ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 ++ ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 ++ ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 ++ ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 ++ ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 ++ ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 ++ ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 ++ ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 ++ ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 ++ ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 ++ ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 ++ ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 ++) ++ ++var ResolveReply_ErrorCode_name = map[int32]string{ ++ 1: "SOCKET_EAI_ADDRFAMILY", ++ 2: "SOCKET_EAI_AGAIN", ++ 3: "SOCKET_EAI_BADFLAGS", ++ 4: "SOCKET_EAI_FAIL", ++ 5: "SOCKET_EAI_FAMILY", ++ 6: "SOCKET_EAI_MEMORY", ++ 7: "SOCKET_EAI_NODATA", ++ 8: "SOCKET_EAI_NONAME", ++ 9: "SOCKET_EAI_SERVICE", ++ 10: "SOCKET_EAI_SOCKTYPE", ++ 11: "SOCKET_EAI_SYSTEM", ++ 12: "SOCKET_EAI_BADHINTS", ++ 13: "SOCKET_EAI_PROTOCOL", ++ 14: "SOCKET_EAI_OVERFLOW", ++ 15: "SOCKET_EAI_MAX", ++} ++var ResolveReply_ErrorCode_value = map[string]int32{ ++ "SOCKET_EAI_ADDRFAMILY": 1, ++ "SOCKET_EAI_AGAIN": 2, ++ "SOCKET_EAI_BADFLAGS": 3, ++ "SOCKET_EAI_FAIL": 4, ++ "SOCKET_EAI_FAMILY": 5, ++ "SOCKET_EAI_MEMORY": 6, ++ "SOCKET_EAI_NODATA": 7, ++ "SOCKET_EAI_NONAME": 8, ++ "SOCKET_EAI_SERVICE": 9, ++ "SOCKET_EAI_SOCKTYPE": 10, ++ "SOCKET_EAI_SYSTEM": 11, ++ "SOCKET_EAI_BADHINTS": 12, ++ "SOCKET_EAI_PROTOCOL": 13, ++ "SOCKET_EAI_OVERFLOW": 14, ++ "SOCKET_EAI_MAX": 15, ++} ++ ++func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { ++ p := new(ResolveReply_ErrorCode) ++ *p = x ++ return p ++} ++func (x ResolveReply_ErrorCode) String() string { ++ return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) ++} ++func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { ++ value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") ++ if err != nil { ++ return err ++ } ++ *x = ResolveReply_ErrorCode(value) ++ return nil ++} ++func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} ++} ++ ++type RemoteSocketServiceError struct { ++ SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` ++ ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } ++func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } ++func (*RemoteSocketServiceError) ProtoMessage() {} ++func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} ++} ++func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) ++} ++func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) ++} ++func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) ++} ++func (m *RemoteSocketServiceError) XXX_Size() int { ++ return xxx_messageInfo_RemoteSocketServiceError.Size(m) ++} ++func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { ++ xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo ++ ++const Default_RemoteSocketServiceError_SystemError int32 = 0 ++ ++func (m *RemoteSocketServiceError) GetSystemError() int32 { ++ if m != nil && m.SystemError != nil { ++ return *m.SystemError ++ } ++ return Default_RemoteSocketServiceError_SystemError ++} ++ ++func (m *RemoteSocketServiceError) GetErrorDetail() string { ++ if m != nil && m.ErrorDetail != nil { ++ return *m.ErrorDetail ++ } ++ return "" ++} ++ ++type AddressPort struct { ++ Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` ++ PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` ++ HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *AddressPort) Reset() { *m = AddressPort{} } ++func (m *AddressPort) String() string { return proto.CompactTextString(m) } ++func (*AddressPort) ProtoMessage() {} ++func (*AddressPort) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} ++} ++func (m *AddressPort) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_AddressPort.Unmarshal(m, b) ++} ++func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) ++} ++func (dst *AddressPort) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_AddressPort.Merge(dst, src) ++} ++func (m *AddressPort) XXX_Size() int { ++ return xxx_messageInfo_AddressPort.Size(m) ++} ++func (m *AddressPort) XXX_DiscardUnknown() { ++ xxx_messageInfo_AddressPort.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_AddressPort proto.InternalMessageInfo ++ ++func (m *AddressPort) GetPort() int32 { ++ if m != nil && m.Port != nil { ++ return *m.Port ++ } ++ return 0 ++} ++ ++func (m *AddressPort) GetPackedAddress() []byte { ++ if m != nil { ++ return m.PackedAddress ++ } ++ return nil ++} ++ ++func (m *AddressPort) GetHostnameHint() string { ++ if m != nil && m.HostnameHint != nil { ++ return *m.HostnameHint ++ } ++ return "" ++} ++ ++type CreateSocketRequest struct { ++ Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` ++ Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` ++ SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` ++ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` ++ RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` ++ AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` ++ ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } ++func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } ++func (*CreateSocketRequest) ProtoMessage() {} ++func (*CreateSocketRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} ++} ++func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) ++} ++func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) ++} ++func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CreateSocketRequest.Merge(dst, src) ++} ++func (m *CreateSocketRequest) XXX_Size() int { ++ return xxx_messageInfo_CreateSocketRequest.Size(m) ++} ++func (m *CreateSocketRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo ++ ++const Default_CreateSocketRequest_ListenBacklog int32 = 0 ++ ++func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { ++ if m != nil && m.Family != nil { ++ return *m.Family ++ } ++ return CreateSocketRequest_IPv4 ++} ++ ++func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { ++ if m != nil && m.Protocol != nil { ++ return *m.Protocol ++ } ++ return CreateSocketRequest_TCP ++} ++ ++func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { ++ if m != nil { ++ return m.SocketOptions ++ } ++ return nil ++} ++ ++func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++func (m *CreateSocketRequest) GetListenBacklog() int32 { ++ if m != nil && m.ListenBacklog != nil { ++ return *m.ListenBacklog ++ } ++ return Default_CreateSocketRequest_ListenBacklog ++} ++ ++func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { ++ if m != nil { ++ return m.RemoteIp ++ } ++ return nil ++} ++ ++func (m *CreateSocketRequest) GetAppId() string { ++ if m != nil && m.AppId != nil { ++ return *m.AppId ++ } ++ return "" ++} ++ ++func (m *CreateSocketRequest) GetProjectId() int64 { ++ if m != nil && m.ProjectId != nil { ++ return *m.ProjectId ++ } ++ return 0 ++} ++ ++type CreateSocketReply struct { ++ SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` ++ ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ proto.XXX_InternalExtensions `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } ++func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } ++func (*CreateSocketReply) ProtoMessage() {} ++func (*CreateSocketReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} ++} ++ ++var extRange_CreateSocketReply = []proto.ExtensionRange{ ++ {Start: 1000, End: 536870911}, ++} ++ ++func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { ++ return extRange_CreateSocketReply ++} ++func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) ++} ++func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) ++} ++func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CreateSocketReply.Merge(dst, src) ++} ++func (m *CreateSocketReply) XXX_Size() int { ++ return xxx_messageInfo_CreateSocketReply.Size(m) ++} ++func (m *CreateSocketReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo ++ ++func (m *CreateSocketReply) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *CreateSocketReply) GetServerAddress() *AddressPort { ++ if m != nil { ++ return m.ServerAddress ++ } ++ return nil ++} ++ ++func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++type BindRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *BindRequest) Reset() { *m = BindRequest{} } ++func (m *BindRequest) String() string { return proto.CompactTextString(m) } ++func (*BindRequest) ProtoMessage() {} ++func (*BindRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} ++} ++func (m *BindRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_BindRequest.Unmarshal(m, b) ++} ++func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) ++} ++func (dst *BindRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_BindRequest.Merge(dst, src) ++} ++func (m *BindRequest) XXX_Size() int { ++ return xxx_messageInfo_BindRequest.Size(m) ++} ++func (m *BindRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_BindRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_BindRequest proto.InternalMessageInfo ++ ++func (m *BindRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *BindRequest) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++type BindReply struct { ++ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *BindReply) Reset() { *m = BindReply{} } ++func (m *BindReply) String() string { return proto.CompactTextString(m) } ++func (*BindReply) ProtoMessage() {} ++func (*BindReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} ++} ++func (m *BindReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_BindReply.Unmarshal(m, b) ++} ++func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) ++} ++func (dst *BindReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_BindReply.Merge(dst, src) ++} ++func (m *BindReply) XXX_Size() int { ++ return xxx_messageInfo_BindReply.Size(m) ++} ++func (m *BindReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_BindReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_BindReply proto.InternalMessageInfo ++ ++func (m *BindReply) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++type GetSocketNameRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } ++func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } ++func (*GetSocketNameRequest) ProtoMessage() {} ++func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} ++} ++func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) ++} ++func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) ++} ++func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) ++} ++func (m *GetSocketNameRequest) XXX_Size() int { ++ return xxx_messageInfo_GetSocketNameRequest.Size(m) ++} ++func (m *GetSocketNameRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo ++ ++func (m *GetSocketNameRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++type GetSocketNameReply struct { ++ ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } ++func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } ++func (*GetSocketNameReply) ProtoMessage() {} ++func (*GetSocketNameReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} ++} ++func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) ++} ++func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) ++} ++func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetSocketNameReply.Merge(dst, src) ++} ++func (m *GetSocketNameReply) XXX_Size() int { ++ return xxx_messageInfo_GetSocketNameReply.Size(m) ++} ++func (m *GetSocketNameReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo ++ ++func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++type GetPeerNameRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } ++func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } ++func (*GetPeerNameRequest) ProtoMessage() {} ++func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} ++} ++func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) ++} ++func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) ++} ++func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) ++} ++func (m *GetPeerNameRequest) XXX_Size() int { ++ return xxx_messageInfo_GetPeerNameRequest.Size(m) ++} ++func (m *GetPeerNameRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo ++ ++func (m *GetPeerNameRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++type GetPeerNameReply struct { ++ PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } ++func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } ++func (*GetPeerNameReply) ProtoMessage() {} ++func (*GetPeerNameReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} ++} ++func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) ++} ++func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) ++} ++func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetPeerNameReply.Merge(dst, src) ++} ++func (m *GetPeerNameReply) XXX_Size() int { ++ return xxx_messageInfo_GetPeerNameReply.Size(m) ++} ++func (m *GetPeerNameReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo ++ ++func (m *GetPeerNameReply) GetPeerIp() *AddressPort { ++ if m != nil { ++ return m.PeerIp ++ } ++ return nil ++} ++ ++type SocketOption struct { ++ Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` ++ Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` ++ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *SocketOption) Reset() { *m = SocketOption{} } ++func (m *SocketOption) String() string { return proto.CompactTextString(m) } ++func (*SocketOption) ProtoMessage() {} ++func (*SocketOption) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} ++} ++func (m *SocketOption) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_SocketOption.Unmarshal(m, b) ++} ++func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) ++} ++func (dst *SocketOption) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SocketOption.Merge(dst, src) ++} ++func (m *SocketOption) XXX_Size() int { ++ return xxx_messageInfo_SocketOption.Size(m) ++} ++func (m *SocketOption) XXX_DiscardUnknown() { ++ xxx_messageInfo_SocketOption.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SocketOption proto.InternalMessageInfo ++ ++func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { ++ if m != nil && m.Level != nil { ++ return *m.Level ++ } ++ return SocketOption_SOCKET_SOL_IP ++} ++ ++func (m *SocketOption) GetOption() SocketOption_SocketOptionName { ++ if m != nil && m.Option != nil { ++ return *m.Option ++ } ++ return SocketOption_SOCKET_SO_DEBUG ++} ++ ++func (m *SocketOption) GetValue() []byte { ++ if m != nil { ++ return m.Value ++ } ++ return nil ++} ++ ++type SetSocketOptionsRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } ++func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } ++func (*SetSocketOptionsRequest) ProtoMessage() {} ++func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} ++} ++func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) ++} ++func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) ++} ++func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) ++} ++func (m *SetSocketOptionsRequest) XXX_Size() int { ++ return xxx_messageInfo_SetSocketOptionsRequest.Size(m) ++} ++func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo ++ ++func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { ++ if m != nil { ++ return m.Options ++ } ++ return nil ++} ++ ++type SetSocketOptionsReply struct { ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } ++func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } ++func (*SetSocketOptionsReply) ProtoMessage() {} ++func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} ++} ++func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) ++} ++func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) ++} ++func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) ++} ++func (m *SetSocketOptionsReply) XXX_Size() int { ++ return xxx_messageInfo_SetSocketOptionsReply.Size(m) ++} ++func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo ++ ++type GetSocketOptionsRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } ++func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } ++func (*GetSocketOptionsRequest) ProtoMessage() {} ++func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} ++} ++func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) ++} ++func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) ++} ++func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) ++} ++func (m *GetSocketOptionsRequest) XXX_Size() int { ++ return xxx_messageInfo_GetSocketOptionsRequest.Size(m) ++} ++func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo ++ ++func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { ++ if m != nil { ++ return m.Options ++ } ++ return nil ++} ++ ++type GetSocketOptionsReply struct { ++ Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } ++func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } ++func (*GetSocketOptionsReply) ProtoMessage() {} ++func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} ++} ++func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) ++} ++func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) ++} ++func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) ++} ++func (m *GetSocketOptionsReply) XXX_Size() int { ++ return xxx_messageInfo_GetSocketOptionsReply.Size(m) ++} ++func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo ++ ++func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { ++ if m != nil { ++ return m.Options ++ } ++ return nil ++} ++ ++type ConnectRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` ++ TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } ++func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } ++func (*ConnectRequest) ProtoMessage() {} ++func (*ConnectRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} ++} ++func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) ++} ++func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) ++} ++func (dst *ConnectRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ConnectRequest.Merge(dst, src) ++} ++func (m *ConnectRequest) XXX_Size() int { ++ return xxx_messageInfo_ConnectRequest.Size(m) ++} ++func (m *ConnectRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ConnectRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo ++ ++const Default_ConnectRequest_TimeoutSeconds float64 = -1 ++ ++func (m *ConnectRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *ConnectRequest) GetRemoteIp() *AddressPort { ++ if m != nil { ++ return m.RemoteIp ++ } ++ return nil ++} ++ ++func (m *ConnectRequest) GetTimeoutSeconds() float64 { ++ if m != nil && m.TimeoutSeconds != nil { ++ return *m.TimeoutSeconds ++ } ++ return Default_ConnectRequest_TimeoutSeconds ++} ++ ++type ConnectReply struct { ++ ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ proto.XXX_InternalExtensions `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ConnectReply) Reset() { *m = ConnectReply{} } ++func (m *ConnectReply) String() string { return proto.CompactTextString(m) } ++func (*ConnectReply) ProtoMessage() {} ++func (*ConnectReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} ++} ++ ++var extRange_ConnectReply = []proto.ExtensionRange{ ++ {Start: 1000, End: 536870911}, ++} ++ ++func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { ++ return extRange_ConnectReply ++} ++func (m *ConnectReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ConnectReply.Unmarshal(m, b) ++} ++func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) ++} ++func (dst *ConnectReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ConnectReply.Merge(dst, src) ++} ++func (m *ConnectReply) XXX_Size() int { ++ return xxx_messageInfo_ConnectReply.Size(m) ++} ++func (m *ConnectReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_ConnectReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ConnectReply proto.InternalMessageInfo ++ ++func (m *ConnectReply) GetProxyExternalIp() *AddressPort { ++ if m != nil { ++ return m.ProxyExternalIp ++ } ++ return nil ++} ++ ++type ListenRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ListenRequest) Reset() { *m = ListenRequest{} } ++func (m *ListenRequest) String() string { return proto.CompactTextString(m) } ++func (*ListenRequest) ProtoMessage() {} ++func (*ListenRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} ++} ++func (m *ListenRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ListenRequest.Unmarshal(m, b) ++} ++func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) ++} ++func (dst *ListenRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ListenRequest.Merge(dst, src) ++} ++func (m *ListenRequest) XXX_Size() int { ++ return xxx_messageInfo_ListenRequest.Size(m) ++} ++func (m *ListenRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ListenRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ListenRequest proto.InternalMessageInfo ++ ++func (m *ListenRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *ListenRequest) GetBacklog() int32 { ++ if m != nil && m.Backlog != nil { ++ return *m.Backlog ++ } ++ return 0 ++} ++ ++type ListenReply struct { ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ListenReply) Reset() { *m = ListenReply{} } ++func (m *ListenReply) String() string { return proto.CompactTextString(m) } ++func (*ListenReply) ProtoMessage() {} ++func (*ListenReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} ++} ++func (m *ListenReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ListenReply.Unmarshal(m, b) ++} ++func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) ++} ++func (dst *ListenReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ListenReply.Merge(dst, src) ++} ++func (m *ListenReply) XXX_Size() int { ++ return xxx_messageInfo_ListenReply.Size(m) ++} ++func (m *ListenReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_ListenReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ListenReply proto.InternalMessageInfo ++ ++type AcceptRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } ++func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } ++func (*AcceptRequest) ProtoMessage() {} ++func (*AcceptRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} ++} ++func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) ++} ++func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) ++} ++func (dst *AcceptRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_AcceptRequest.Merge(dst, src) ++} ++func (m *AcceptRequest) XXX_Size() int { ++ return xxx_messageInfo_AcceptRequest.Size(m) ++} ++func (m *AcceptRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_AcceptRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo ++ ++const Default_AcceptRequest_TimeoutSeconds float64 = -1 ++ ++func (m *AcceptRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *AcceptRequest) GetTimeoutSeconds() float64 { ++ if m != nil && m.TimeoutSeconds != nil { ++ return *m.TimeoutSeconds ++ } ++ return Default_AcceptRequest_TimeoutSeconds ++} ++ ++type AcceptReply struct { ++ NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` ++ RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *AcceptReply) Reset() { *m = AcceptReply{} } ++func (m *AcceptReply) String() string { return proto.CompactTextString(m) } ++func (*AcceptReply) ProtoMessage() {} ++func (*AcceptReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} ++} ++func (m *AcceptReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_AcceptReply.Unmarshal(m, b) ++} ++func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) ++} ++func (dst *AcceptReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_AcceptReply.Merge(dst, src) ++} ++func (m *AcceptReply) XXX_Size() int { ++ return xxx_messageInfo_AcceptReply.Size(m) ++} ++func (m *AcceptReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_AcceptReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_AcceptReply proto.InternalMessageInfo ++ ++func (m *AcceptReply) GetNewSocketDescriptor() []byte { ++ if m != nil { ++ return m.NewSocketDescriptor ++ } ++ return nil ++} ++ ++func (m *AcceptReply) GetRemoteAddress() *AddressPort { ++ if m != nil { ++ return m.RemoteAddress ++ } ++ return nil ++} ++ ++type ShutDownRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` ++ SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } ++func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } ++func (*ShutDownRequest) ProtoMessage() {} ++func (*ShutDownRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} ++} ++func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) ++} ++func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) ++} ++func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ShutDownRequest.Merge(dst, src) ++} ++func (m *ShutDownRequest) XXX_Size() int { ++ return xxx_messageInfo_ShutDownRequest.Size(m) ++} ++func (m *ShutDownRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo ++ ++func (m *ShutDownRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *ShutDownRequest) GetHow() ShutDownRequest_How { ++ if m != nil && m.How != nil { ++ return *m.How ++ } ++ return ShutDownRequest_SOCKET_SHUT_RD ++} ++ ++func (m *ShutDownRequest) GetSendOffset() int64 { ++ if m != nil && m.SendOffset != nil { ++ return *m.SendOffset ++ } ++ return 0 ++} ++ ++type ShutDownReply struct { ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } ++func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } ++func (*ShutDownReply) ProtoMessage() {} ++func (*ShutDownReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} ++} ++func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) ++} ++func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) ++} ++func (dst *ShutDownReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ShutDownReply.Merge(dst, src) ++} ++func (m *ShutDownReply) XXX_Size() int { ++ return xxx_messageInfo_ShutDownReply.Size(m) ++} ++func (m *ShutDownReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_ShutDownReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo ++ ++type CloseRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *CloseRequest) Reset() { *m = CloseRequest{} } ++func (m *CloseRequest) String() string { return proto.CompactTextString(m) } ++func (*CloseRequest) ProtoMessage() {} ++func (*CloseRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} ++} ++func (m *CloseRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_CloseRequest.Unmarshal(m, b) ++} ++func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) ++} ++func (dst *CloseRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CloseRequest.Merge(dst, src) ++} ++func (m *CloseRequest) XXX_Size() int { ++ return xxx_messageInfo_CloseRequest.Size(m) ++} ++func (m *CloseRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_CloseRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_CloseRequest proto.InternalMessageInfo ++ ++const Default_CloseRequest_SendOffset int64 = -1 ++ ++func (m *CloseRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *CloseRequest) GetSendOffset() int64 { ++ if m != nil && m.SendOffset != nil { ++ return *m.SendOffset ++ } ++ return Default_CloseRequest_SendOffset ++} ++ ++type CloseReply struct { ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *CloseReply) Reset() { *m = CloseReply{} } ++func (m *CloseReply) String() string { return proto.CompactTextString(m) } ++func (*CloseReply) ProtoMessage() {} ++func (*CloseReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} ++} ++func (m *CloseReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_CloseReply.Unmarshal(m, b) ++} ++func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) ++} ++func (dst *CloseReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CloseReply.Merge(dst, src) ++} ++func (m *CloseReply) XXX_Size() int { ++ return xxx_messageInfo_CloseReply.Size(m) ++} ++func (m *CloseReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_CloseReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_CloseReply proto.InternalMessageInfo ++ ++type SendRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` ++ StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` ++ Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` ++ SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` ++ TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *SendRequest) Reset() { *m = SendRequest{} } ++func (m *SendRequest) String() string { return proto.CompactTextString(m) } ++func (*SendRequest) ProtoMessage() {} ++func (*SendRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} ++} ++func (m *SendRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_SendRequest.Unmarshal(m, b) ++} ++func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) ++} ++func (dst *SendRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SendRequest.Merge(dst, src) ++} ++func (m *SendRequest) XXX_Size() int { ++ return xxx_messageInfo_SendRequest.Size(m) ++} ++func (m *SendRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_SendRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SendRequest proto.InternalMessageInfo ++ ++const Default_SendRequest_Flags int32 = 0 ++const Default_SendRequest_TimeoutSeconds float64 = -1 ++ ++func (m *SendRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *SendRequest) GetData() []byte { ++ if m != nil { ++ return m.Data ++ } ++ return nil ++} ++ ++func (m *SendRequest) GetStreamOffset() int64 { ++ if m != nil && m.StreamOffset != nil { ++ return *m.StreamOffset ++ } ++ return 0 ++} ++ ++func (m *SendRequest) GetFlags() int32 { ++ if m != nil && m.Flags != nil { ++ return *m.Flags ++ } ++ return Default_SendRequest_Flags ++} ++ ++func (m *SendRequest) GetSendTo() *AddressPort { ++ if m != nil { ++ return m.SendTo ++ } ++ return nil ++} ++ ++func (m *SendRequest) GetTimeoutSeconds() float64 { ++ if m != nil && m.TimeoutSeconds != nil { ++ return *m.TimeoutSeconds ++ } ++ return Default_SendRequest_TimeoutSeconds ++} ++ ++type SendReply struct { ++ DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *SendReply) Reset() { *m = SendReply{} } ++func (m *SendReply) String() string { return proto.CompactTextString(m) } ++func (*SendReply) ProtoMessage() {} ++func (*SendReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} ++} ++func (m *SendReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_SendReply.Unmarshal(m, b) ++} ++func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) ++} ++func (dst *SendReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SendReply.Merge(dst, src) ++} ++func (m *SendReply) XXX_Size() int { ++ return xxx_messageInfo_SendReply.Size(m) ++} ++func (m *SendReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_SendReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SendReply proto.InternalMessageInfo ++ ++func (m *SendReply) GetDataSent() int32 { ++ if m != nil && m.DataSent != nil { ++ return *m.DataSent ++ } ++ return 0 ++} ++ ++type ReceiveRequest struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` ++ Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` ++ TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } ++func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } ++func (*ReceiveRequest) ProtoMessage() {} ++func (*ReceiveRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} ++} ++func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) ++} ++func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) ++} ++func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ReceiveRequest.Merge(dst, src) ++} ++func (m *ReceiveRequest) XXX_Size() int { ++ return xxx_messageInfo_ReceiveRequest.Size(m) ++} ++func (m *ReceiveRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo ++ ++const Default_ReceiveRequest_Flags int32 = 0 ++const Default_ReceiveRequest_TimeoutSeconds float64 = -1 ++ ++func (m *ReceiveRequest) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *ReceiveRequest) GetDataSize() int32 { ++ if m != nil && m.DataSize != nil { ++ return *m.DataSize ++ } ++ return 0 ++} ++ ++func (m *ReceiveRequest) GetFlags() int32 { ++ if m != nil && m.Flags != nil { ++ return *m.Flags ++ } ++ return Default_ReceiveRequest_Flags ++} ++ ++func (m *ReceiveRequest) GetTimeoutSeconds() float64 { ++ if m != nil && m.TimeoutSeconds != nil { ++ return *m.TimeoutSeconds ++ } ++ return Default_ReceiveRequest_TimeoutSeconds ++} ++ ++type ReceiveReply struct { ++ StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` ++ Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` ++ ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` ++ BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } ++func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } ++func (*ReceiveReply) ProtoMessage() {} ++func (*ReceiveReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} ++} ++func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) ++} ++func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) ++} ++func (dst *ReceiveReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ReceiveReply.Merge(dst, src) ++} ++func (m *ReceiveReply) XXX_Size() int { ++ return xxx_messageInfo_ReceiveReply.Size(m) ++} ++func (m *ReceiveReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_ReceiveReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo ++ ++func (m *ReceiveReply) GetStreamOffset() int64 { ++ if m != nil && m.StreamOffset != nil { ++ return *m.StreamOffset ++ } ++ return 0 ++} ++ ++func (m *ReceiveReply) GetData() []byte { ++ if m != nil { ++ return m.Data ++ } ++ return nil ++} ++ ++func (m *ReceiveReply) GetReceivedFrom() *AddressPort { ++ if m != nil { ++ return m.ReceivedFrom ++ } ++ return nil ++} ++ ++func (m *ReceiveReply) GetBufferSize() int32 { ++ if m != nil && m.BufferSize != nil { ++ return *m.BufferSize ++ } ++ return 0 ++} ++ ++type PollEvent struct { ++ SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` ++ RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` ++ ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *PollEvent) Reset() { *m = PollEvent{} } ++func (m *PollEvent) String() string { return proto.CompactTextString(m) } ++func (*PollEvent) ProtoMessage() {} ++func (*PollEvent) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} ++} ++func (m *PollEvent) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_PollEvent.Unmarshal(m, b) ++} ++func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) ++} ++func (dst *PollEvent) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_PollEvent.Merge(dst, src) ++} ++func (m *PollEvent) XXX_Size() int { ++ return xxx_messageInfo_PollEvent.Size(m) ++} ++func (m *PollEvent) XXX_DiscardUnknown() { ++ xxx_messageInfo_PollEvent.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_PollEvent proto.InternalMessageInfo ++ ++func (m *PollEvent) GetSocketDescriptor() string { ++ if m != nil && m.SocketDescriptor != nil { ++ return *m.SocketDescriptor ++ } ++ return "" ++} ++ ++func (m *PollEvent) GetRequestedEvents() int32 { ++ if m != nil && m.RequestedEvents != nil { ++ return *m.RequestedEvents ++ } ++ return 0 ++} ++ ++func (m *PollEvent) GetObservedEvents() int32 { ++ if m != nil && m.ObservedEvents != nil { ++ return *m.ObservedEvents ++ } ++ return 0 ++} ++ ++type PollRequest struct { ++ Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` ++ TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *PollRequest) Reset() { *m = PollRequest{} } ++func (m *PollRequest) String() string { return proto.CompactTextString(m) } ++func (*PollRequest) ProtoMessage() {} ++func (*PollRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} ++} ++func (m *PollRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_PollRequest.Unmarshal(m, b) ++} ++func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) ++} ++func (dst *PollRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_PollRequest.Merge(dst, src) ++} ++func (m *PollRequest) XXX_Size() int { ++ return xxx_messageInfo_PollRequest.Size(m) ++} ++func (m *PollRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_PollRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_PollRequest proto.InternalMessageInfo ++ ++const Default_PollRequest_TimeoutSeconds float64 = -1 ++ ++func (m *PollRequest) GetEvents() []*PollEvent { ++ if m != nil { ++ return m.Events ++ } ++ return nil ++} ++ ++func (m *PollRequest) GetTimeoutSeconds() float64 { ++ if m != nil && m.TimeoutSeconds != nil { ++ return *m.TimeoutSeconds ++ } ++ return Default_PollRequest_TimeoutSeconds ++} ++ ++type PollReply struct { ++ Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *PollReply) Reset() { *m = PollReply{} } ++func (m *PollReply) String() string { return proto.CompactTextString(m) } ++func (*PollReply) ProtoMessage() {} ++func (*PollReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} ++} ++func (m *PollReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_PollReply.Unmarshal(m, b) ++} ++func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) ++} ++func (dst *PollReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_PollReply.Merge(dst, src) ++} ++func (m *PollReply) XXX_Size() int { ++ return xxx_messageInfo_PollReply.Size(m) ++} ++func (m *PollReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_PollReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_PollReply proto.InternalMessageInfo ++ ++func (m *PollReply) GetEvents() []*PollEvent { ++ if m != nil { ++ return m.Events ++ } ++ return nil ++} ++ ++type ResolveRequest struct { ++ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` ++ AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } ++func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } ++func (*ResolveRequest) ProtoMessage() {} ++func (*ResolveRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} ++} ++func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) ++} ++func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) ++} ++func (dst *ResolveRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ResolveRequest.Merge(dst, src) ++} ++func (m *ResolveRequest) XXX_Size() int { ++ return xxx_messageInfo_ResolveRequest.Size(m) ++} ++func (m *ResolveRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ResolveRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo ++ ++func (m *ResolveRequest) GetName() string { ++ if m != nil && m.Name != nil { ++ return *m.Name ++ } ++ return "" ++} ++ ++func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { ++ if m != nil { ++ return m.AddressFamilies ++ } ++ return nil ++} ++ ++type ResolveReply struct { ++ PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` ++ CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` ++ Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` ++ XXX_NoUnkeyedLiteral struct{} `json:"-"` ++ XXX_unrecognized []byte `json:"-"` ++ XXX_sizecache int32 `json:"-"` ++} ++ ++func (m *ResolveReply) Reset() { *m = ResolveReply{} } ++func (m *ResolveReply) String() string { return proto.CompactTextString(m) } ++func (*ResolveReply) ProtoMessage() {} ++func (*ResolveReply) Descriptor() ([]byte, []int) { ++ return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} ++} ++func (m *ResolveReply) XXX_Unmarshal(b []byte) error { ++ return xxx_messageInfo_ResolveReply.Unmarshal(m, b) ++} ++func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) ++} ++func (dst *ResolveReply) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ResolveReply.Merge(dst, src) ++} ++func (m *ResolveReply) XXX_Size() int { ++ return xxx_messageInfo_ResolveReply.Size(m) ++} ++func (m *ResolveReply) XXX_DiscardUnknown() { ++ xxx_messageInfo_ResolveReply.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ResolveReply proto.InternalMessageInfo ++ ++func (m *ResolveReply) GetPackedAddress() [][]byte { ++ if m != nil { ++ return m.PackedAddress ++ } ++ return nil ++} ++ ++func (m *ResolveReply) GetCanonicalName() string { ++ if m != nil && m.CanonicalName != nil { ++ return *m.CanonicalName ++ } ++ return "" ++} ++ ++func (m *ResolveReply) GetAliases() []string { ++ if m != nil { ++ return m.Aliases ++ } ++ return nil ++} ++ ++func init() { ++ proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") ++ proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") ++ proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") ++ proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") ++ proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") ++ proto.RegisterType((*BindReply)(nil), "appengine.BindReply") ++ proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") ++ proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") ++ proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") ++ proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") ++ proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") ++ proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") ++ proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") ++ proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") ++ proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") ++ proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") ++ proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") ++ proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") ++ proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") ++ proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") ++ proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") ++ proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") ++ proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") ++ proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") ++ proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") ++ proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") ++ proto.RegisterType((*SendReply)(nil), "appengine.SendReply") ++ proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") ++ proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") ++ proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") ++ proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") ++ proto.RegisterType((*PollReply)(nil), "appengine.PollReply") ++ proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") ++ proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") ++} ++ ++func init() { ++ proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) ++} ++ ++var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ ++ // 3088 bytes of a gzipped FileDescriptorProto ++ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, ++ 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, ++ 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, ++ 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, ++ 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, ++ 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, ++ 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, ++ 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, ++ 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, ++ 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, ++ 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, ++ 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, ++ 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, ++ 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, ++ 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, ++ 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, ++ 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, ++ 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, ++ 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, ++ 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, ++ 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, ++ 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, ++ 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, ++ 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, ++ 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, ++ 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, ++ 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, ++ 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, ++ 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, ++ 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, ++ 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, ++ 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, ++ 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, ++ 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, ++ 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, ++ 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, ++ 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, ++ 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, ++ 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, ++ 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, ++ 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, ++ 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, ++ 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, ++ 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, ++ 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, ++ 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, ++ 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, ++ 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, ++ 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, ++ 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, ++ 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, ++ 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, ++ 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, ++ 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, ++ 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, ++ 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, ++ 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, ++ 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, ++ 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, ++ 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, ++ 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, ++ 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, ++ 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, ++ 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, ++ 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, ++ 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, ++ 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, ++ 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, ++ 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, ++ 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, ++ 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, ++ 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, ++ 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, ++ 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, ++ 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, ++ 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, ++ 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, ++ 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, ++ 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, ++ 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, ++ 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, ++ 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, ++ 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, ++ 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, ++ 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, ++ 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, ++ 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, ++ 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, ++ 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, ++ 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, ++ 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, ++ 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, ++ 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, ++ 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, ++ 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, ++ 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, ++ 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, ++ 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, ++ 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, ++ 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, ++ 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, ++ 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, ++ 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, ++ 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, ++ 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, ++ 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, ++ 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, ++ 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, ++ 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, ++ 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, ++ 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, ++ 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, ++ 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, ++ 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, ++ 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, ++ 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, ++ 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, ++ 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, ++ 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, ++ 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, ++ 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, ++ 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, ++ 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, ++ 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, ++ 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, ++ 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, ++ 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, ++ 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, ++ 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, ++ 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, ++ 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, ++ 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, ++ 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, ++ 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, ++ 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, ++ 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, ++ 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, ++ 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, ++ 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, ++ 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, ++ 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, ++ 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, ++ 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, ++ 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, ++ 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, ++ 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, ++ 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, ++ 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, ++ 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, ++ 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, ++ 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, ++ 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, ++ 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, ++ 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, ++ 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, ++ 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, ++ 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, ++ 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, ++ 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, ++ 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, ++ 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, ++ 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, ++ 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, ++ 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, ++ 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, ++ 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, ++ 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, ++ 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, ++ 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, ++ 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, ++ 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, ++ 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, ++ 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, ++ 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, ++ 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, ++ 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, ++ 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, ++ 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, ++ 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, ++ 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, ++ 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, ++ 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, ++ 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, ++ 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, ++ 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, ++ 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, ++ 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, ++ 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, ++ 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, ++ 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, ++ 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, ++ 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, ++ 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, ++} +diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto +new file mode 100644 +index 00000000000..2fcc7953dc0 +--- /dev/null ++++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto +@@ -0,0 +1,460 @@ ++syntax = "proto2"; ++option go_package = "socket"; ++ ++package appengine; ++ ++message RemoteSocketServiceError { ++ enum ErrorCode { ++ SYSTEM_ERROR = 1; ++ GAI_ERROR = 2; ++ FAILURE = 4; ++ PERMISSION_DENIED = 5; ++ INVALID_REQUEST = 6; ++ SOCKET_CLOSED = 7; ++ } ++ ++ enum SystemError { ++ option allow_alias = true; ++ ++ SYS_SUCCESS = 0; ++ SYS_EPERM = 1; ++ SYS_ENOENT = 2; ++ SYS_ESRCH = 3; ++ SYS_EINTR = 4; ++ SYS_EIO = 5; ++ SYS_ENXIO = 6; ++ SYS_E2BIG = 7; ++ SYS_ENOEXEC = 8; ++ SYS_EBADF = 9; ++ SYS_ECHILD = 10; ++ SYS_EAGAIN = 11; ++ SYS_EWOULDBLOCK = 11; ++ SYS_ENOMEM = 12; ++ SYS_EACCES = 13; ++ SYS_EFAULT = 14; ++ SYS_ENOTBLK = 15; ++ SYS_EBUSY = 16; ++ SYS_EEXIST = 17; ++ SYS_EXDEV = 18; ++ SYS_ENODEV = 19; ++ SYS_ENOTDIR = 20; ++ SYS_EISDIR = 21; ++ SYS_EINVAL = 22; ++ SYS_ENFILE = 23; ++ SYS_EMFILE = 24; ++ SYS_ENOTTY = 25; ++ SYS_ETXTBSY = 26; ++ SYS_EFBIG = 27; ++ SYS_ENOSPC = 28; ++ SYS_ESPIPE = 29; ++ SYS_EROFS = 30; ++ SYS_EMLINK = 31; ++ SYS_EPIPE = 32; ++ SYS_EDOM = 33; ++ SYS_ERANGE = 34; ++ SYS_EDEADLK = 35; ++ SYS_EDEADLOCK = 35; ++ SYS_ENAMETOOLONG = 36; ++ SYS_ENOLCK = 37; ++ SYS_ENOSYS = 38; ++ SYS_ENOTEMPTY = 39; ++ SYS_ELOOP = 40; ++ SYS_ENOMSG = 42; ++ SYS_EIDRM = 43; ++ SYS_ECHRNG = 44; ++ SYS_EL2NSYNC = 45; ++ SYS_EL3HLT = 46; ++ SYS_EL3RST = 47; ++ SYS_ELNRNG = 48; ++ SYS_EUNATCH = 49; ++ SYS_ENOCSI = 50; ++ SYS_EL2HLT = 51; ++ SYS_EBADE = 52; ++ SYS_EBADR = 53; ++ SYS_EXFULL = 54; ++ SYS_ENOANO = 55; ++ SYS_EBADRQC = 56; ++ SYS_EBADSLT = 57; ++ SYS_EBFONT = 59; ++ SYS_ENOSTR = 60; ++ SYS_ENODATA = 61; ++ SYS_ETIME = 62; ++ SYS_ENOSR = 63; ++ SYS_ENONET = 64; ++ SYS_ENOPKG = 65; ++ SYS_EREMOTE = 66; ++ SYS_ENOLINK = 67; ++ SYS_EADV = 68; ++ SYS_ESRMNT = 69; ++ SYS_ECOMM = 70; ++ SYS_EPROTO = 71; ++ SYS_EMULTIHOP = 72; ++ SYS_EDOTDOT = 73; ++ SYS_EBADMSG = 74; ++ SYS_EOVERFLOW = 75; ++ SYS_ENOTUNIQ = 76; ++ SYS_EBADFD = 77; ++ SYS_EREMCHG = 78; ++ SYS_ELIBACC = 79; ++ SYS_ELIBBAD = 80; ++ SYS_ELIBSCN = 81; ++ SYS_ELIBMAX = 82; ++ SYS_ELIBEXEC = 83; ++ SYS_EILSEQ = 84; ++ SYS_ERESTART = 85; ++ SYS_ESTRPIPE = 86; ++ SYS_EUSERS = 87; ++ SYS_ENOTSOCK = 88; ++ SYS_EDESTADDRREQ = 89; ++ SYS_EMSGSIZE = 90; ++ SYS_EPROTOTYPE = 91; ++ SYS_ENOPROTOOPT = 92; ++ SYS_EPROTONOSUPPORT = 93; ++ SYS_ESOCKTNOSUPPORT = 94; ++ SYS_EOPNOTSUPP = 95; ++ SYS_ENOTSUP = 95; ++ SYS_EPFNOSUPPORT = 96; ++ SYS_EAFNOSUPPORT = 97; ++ SYS_EADDRINUSE = 98; ++ SYS_EADDRNOTAVAIL = 99; ++ SYS_ENETDOWN = 100; ++ SYS_ENETUNREACH = 101; ++ SYS_ENETRESET = 102; ++ SYS_ECONNABORTED = 103; ++ SYS_ECONNRESET = 104; ++ SYS_ENOBUFS = 105; ++ SYS_EISCONN = 106; ++ SYS_ENOTCONN = 107; ++ SYS_ESHUTDOWN = 108; ++ SYS_ETOOMANYREFS = 109; ++ SYS_ETIMEDOUT = 110; ++ SYS_ECONNREFUSED = 111; ++ SYS_EHOSTDOWN = 112; ++ SYS_EHOSTUNREACH = 113; ++ SYS_EALREADY = 114; ++ SYS_EINPROGRESS = 115; ++ SYS_ESTALE = 116; ++ SYS_EUCLEAN = 117; ++ SYS_ENOTNAM = 118; ++ SYS_ENAVAIL = 119; ++ SYS_EISNAM = 120; ++ SYS_EREMOTEIO = 121; ++ SYS_EDQUOT = 122; ++ SYS_ENOMEDIUM = 123; ++ SYS_EMEDIUMTYPE = 124; ++ SYS_ECANCELED = 125; ++ SYS_ENOKEY = 126; ++ SYS_EKEYEXPIRED = 127; ++ SYS_EKEYREVOKED = 128; ++ SYS_EKEYREJECTED = 129; ++ SYS_EOWNERDEAD = 130; ++ SYS_ENOTRECOVERABLE = 131; ++ SYS_ERFKILL = 132; ++ } ++ ++ optional int32 system_error = 1 [default=0]; ++ optional string error_detail = 2; ++} ++ ++message AddressPort { ++ required int32 port = 1; ++ optional bytes packed_address = 2; ++ ++ optional string hostname_hint = 3; ++} ++ ++ ++ ++message CreateSocketRequest { ++ enum SocketFamily { ++ IPv4 = 1; ++ IPv6 = 2; ++ } ++ ++ enum SocketProtocol { ++ TCP = 1; ++ UDP = 2; ++ } ++ ++ required SocketFamily family = 1; ++ required SocketProtocol protocol = 2; ++ ++ repeated SocketOption socket_options = 3; ++ ++ optional AddressPort proxy_external_ip = 4; ++ ++ optional int32 listen_backlog = 5 [default=0]; ++ ++ optional AddressPort remote_ip = 6; ++ ++ optional string app_id = 9; ++ ++ optional int64 project_id = 10; ++} ++ ++message CreateSocketReply { ++ optional string socket_descriptor = 1; ++ ++ optional AddressPort server_address = 3; ++ ++ optional AddressPort proxy_external_ip = 4; ++ ++ extensions 1000 to max; ++} ++ ++ ++ ++message BindRequest { ++ required string socket_descriptor = 1; ++ required AddressPort proxy_external_ip = 2; ++} ++ ++message BindReply { ++ optional AddressPort proxy_external_ip = 1; ++} ++ ++ ++ ++message GetSocketNameRequest { ++ required string socket_descriptor = 1; ++} ++ ++message GetSocketNameReply { ++ optional AddressPort proxy_external_ip = 2; ++} ++ ++ ++ ++message GetPeerNameRequest { ++ required string socket_descriptor = 1; ++} ++ ++message GetPeerNameReply { ++ optional AddressPort peer_ip = 2; ++} ++ ++ ++message SocketOption { ++ ++ enum SocketOptionLevel { ++ SOCKET_SOL_IP = 0; ++ SOCKET_SOL_SOCKET = 1; ++ SOCKET_SOL_TCP = 6; ++ SOCKET_SOL_UDP = 17; ++ } ++ ++ enum SocketOptionName { ++ option allow_alias = true; ++ ++ SOCKET_SO_DEBUG = 1; ++ SOCKET_SO_REUSEADDR = 2; ++ SOCKET_SO_TYPE = 3; ++ SOCKET_SO_ERROR = 4; ++ SOCKET_SO_DONTROUTE = 5; ++ SOCKET_SO_BROADCAST = 6; ++ SOCKET_SO_SNDBUF = 7; ++ SOCKET_SO_RCVBUF = 8; ++ SOCKET_SO_KEEPALIVE = 9; ++ SOCKET_SO_OOBINLINE = 10; ++ SOCKET_SO_LINGER = 13; ++ SOCKET_SO_RCVTIMEO = 20; ++ SOCKET_SO_SNDTIMEO = 21; ++ ++ SOCKET_IP_TOS = 1; ++ SOCKET_IP_TTL = 2; ++ SOCKET_IP_HDRINCL = 3; ++ SOCKET_IP_OPTIONS = 4; ++ ++ SOCKET_TCP_NODELAY = 1; ++ SOCKET_TCP_MAXSEG = 2; ++ SOCKET_TCP_CORK = 3; ++ SOCKET_TCP_KEEPIDLE = 4; ++ SOCKET_TCP_KEEPINTVL = 5; ++ SOCKET_TCP_KEEPCNT = 6; ++ SOCKET_TCP_SYNCNT = 7; ++ SOCKET_TCP_LINGER2 = 8; ++ SOCKET_TCP_DEFER_ACCEPT = 9; ++ SOCKET_TCP_WINDOW_CLAMP = 10; ++ SOCKET_TCP_INFO = 11; ++ SOCKET_TCP_QUICKACK = 12; ++ } ++ ++ required SocketOptionLevel level = 1; ++ required SocketOptionName option = 2; ++ required bytes value = 3; ++} ++ ++ ++message SetSocketOptionsRequest { ++ required string socket_descriptor = 1; ++ repeated SocketOption options = 2; ++} ++ ++message SetSocketOptionsReply { ++} ++ ++message GetSocketOptionsRequest { ++ required string socket_descriptor = 1; ++ repeated SocketOption options = 2; ++} ++ ++message GetSocketOptionsReply { ++ repeated SocketOption options = 2; ++} ++ ++ ++message ConnectRequest { ++ required string socket_descriptor = 1; ++ required AddressPort remote_ip = 2; ++ optional double timeout_seconds = 3 [default=-1]; ++} ++ ++message ConnectReply { ++ optional AddressPort proxy_external_ip = 1; ++ ++ extensions 1000 to max; ++} ++ ++ ++message ListenRequest { ++ required string socket_descriptor = 1; ++ required int32 backlog = 2; ++} ++ ++message ListenReply { ++} ++ ++ ++message AcceptRequest { ++ required string socket_descriptor = 1; ++ optional double timeout_seconds = 2 [default=-1]; ++} ++ ++message AcceptReply { ++ optional bytes new_socket_descriptor = 2; ++ optional AddressPort remote_address = 3; ++} ++ ++ ++ ++message ShutDownRequest { ++ enum How { ++ SOCKET_SHUT_RD = 1; ++ SOCKET_SHUT_WR = 2; ++ SOCKET_SHUT_RDWR = 3; ++ } ++ required string socket_descriptor = 1; ++ required How how = 2; ++ required int64 send_offset = 3; ++} ++ ++message ShutDownReply { ++} ++ ++ ++ ++message CloseRequest { ++ required string socket_descriptor = 1; ++ optional int64 send_offset = 2 [default=-1]; ++} ++ ++message CloseReply { ++} ++ ++ ++ ++message SendRequest { ++ required string socket_descriptor = 1; ++ required bytes data = 2 [ctype=CORD]; ++ required int64 stream_offset = 3; ++ optional int32 flags = 4 [default=0]; ++ optional AddressPort send_to = 5; ++ optional double timeout_seconds = 6 [default=-1]; ++} ++ ++message SendReply { ++ optional int32 data_sent = 1; ++} ++ ++ ++message ReceiveRequest { ++ enum Flags { ++ MSG_OOB = 1; ++ MSG_PEEK = 2; ++ } ++ required string socket_descriptor = 1; ++ required int32 data_size = 2; ++ optional int32 flags = 3 [default=0]; ++ optional double timeout_seconds = 5 [default=-1]; ++} ++ ++message ReceiveReply { ++ optional int64 stream_offset = 2; ++ optional bytes data = 3 [ctype=CORD]; ++ optional AddressPort received_from = 4; ++ optional int32 buffer_size = 5; ++} ++ ++ ++ ++message PollEvent { ++ ++ enum PollEventFlag { ++ SOCKET_POLLNONE = 0; ++ SOCKET_POLLIN = 1; ++ SOCKET_POLLPRI = 2; ++ SOCKET_POLLOUT = 4; ++ SOCKET_POLLERR = 8; ++ SOCKET_POLLHUP = 16; ++ SOCKET_POLLNVAL = 32; ++ SOCKET_POLLRDNORM = 64; ++ SOCKET_POLLRDBAND = 128; ++ SOCKET_POLLWRNORM = 256; ++ SOCKET_POLLWRBAND = 512; ++ SOCKET_POLLMSG = 1024; ++ SOCKET_POLLREMOVE = 4096; ++ SOCKET_POLLRDHUP = 8192; ++ }; ++ ++ required string socket_descriptor = 1; ++ required int32 requested_events = 2; ++ required int32 observed_events = 3; ++} ++ ++message PollRequest { ++ repeated PollEvent events = 1; ++ optional double timeout_seconds = 2 [default=-1]; ++} ++ ++message PollReply { ++ repeated PollEvent events = 2; ++} ++ ++message ResolveRequest { ++ required string name = 1; ++ repeated CreateSocketRequest.SocketFamily address_families = 2; ++} ++ ++message ResolveReply { ++ enum ErrorCode { ++ SOCKET_EAI_ADDRFAMILY = 1; ++ SOCKET_EAI_AGAIN = 2; ++ SOCKET_EAI_BADFLAGS = 3; ++ SOCKET_EAI_FAIL = 4; ++ SOCKET_EAI_FAMILY = 5; ++ SOCKET_EAI_MEMORY = 6; ++ SOCKET_EAI_NODATA = 7; ++ SOCKET_EAI_NONAME = 8; ++ SOCKET_EAI_SERVICE = 9; ++ SOCKET_EAI_SOCKTYPE = 10; ++ SOCKET_EAI_SYSTEM = 11; ++ SOCKET_EAI_BADHINTS = 12; ++ SOCKET_EAI_PROTOCOL = 13; ++ SOCKET_EAI_OVERFLOW = 14; ++ SOCKET_EAI_MAX = 15; ++ }; ++ ++ repeated bytes packed_address = 2; ++ optional string canonical_name = 3; ++ repeated string aliases = 4; ++} +diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go +new file mode 100644 +index 00000000000..3de46df826b +--- /dev/null ++++ b/vendor/google.golang.org/appengine/socket/doc.go +@@ -0,0 +1,10 @@ ++// Copyright 2012 Google Inc. All rights reserved. ++// Use of this source code is governed by the Apache 2.0 ++// license that can be found in the LICENSE file. ++ ++// Package socket provides outbound network sockets. ++// ++// This package is only required in the classic App Engine environment. ++// Applications running only in App Engine "flexible environment" should ++// use the standard library's net package. ++package socket +diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go +new file mode 100644 +index 00000000000..0ad50e2d36d +--- /dev/null ++++ b/vendor/google.golang.org/appengine/socket/socket_classic.go +@@ -0,0 +1,290 @@ ++// Copyright 2012 Google Inc. All rights reserved. ++// Use of this source code is governed by the Apache 2.0 ++// license that can be found in the LICENSE file. ++ ++// +build appengine ++ ++package socket ++ ++import ( ++ "fmt" ++ "io" ++ "net" ++ "strconv" ++ "time" ++ ++ "github.com/golang/protobuf/proto" ++ "golang.org/x/net/context" ++ "google.golang.org/appengine/internal" ++ ++ pb "google.golang.org/appengine/internal/socket" ++) ++ ++// Dial connects to the address addr on the network protocol. ++// The address format is host:port, where host may be a hostname or an IP address. ++// Known protocols are "tcp" and "udp". ++// The returned connection satisfies net.Conn, and is valid while ctx is valid; ++// if the connection is to be used after ctx becomes invalid, invoke SetContext ++// with the new context. ++func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { ++ return DialTimeout(ctx, protocol, addr, 0) ++} ++ ++var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ ++ pb.CreateSocketRequest_IPv4, ++ pb.CreateSocketRequest_IPv6, ++} ++ ++// DialTimeout is like Dial but takes a timeout. ++// The timeout includes name resolution, if required. ++func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { ++ dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. ++ if timeout > 0 { ++ var cancel context.CancelFunc ++ dialCtx, cancel = context.WithTimeout(ctx, timeout) ++ defer cancel() ++ } ++ ++ host, portStr, err := net.SplitHostPort(addr) ++ if err != nil { ++ return nil, err ++ } ++ port, err := strconv.Atoi(portStr) ++ if err != nil { ++ return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) ++ } ++ ++ var prot pb.CreateSocketRequest_SocketProtocol ++ switch protocol { ++ case "tcp": ++ prot = pb.CreateSocketRequest_TCP ++ case "udp": ++ prot = pb.CreateSocketRequest_UDP ++ default: ++ return nil, fmt.Errorf("socket: unknown protocol %q", protocol) ++ } ++ ++ packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) ++ if err != nil { ++ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) ++ } ++ if len(packedAddrs) == 0 { ++ return nil, fmt.Errorf("no addresses for %q", host) ++ } ++ ++ packedAddr := packedAddrs[0] // use first address ++ fam := pb.CreateSocketRequest_IPv4 ++ if len(packedAddr) == net.IPv6len { ++ fam = pb.CreateSocketRequest_IPv6 ++ } ++ ++ req := &pb.CreateSocketRequest{ ++ Family: fam.Enum(), ++ Protocol: prot.Enum(), ++ RemoteIp: &pb.AddressPort{ ++ Port: proto.Int32(int32(port)), ++ PackedAddress: packedAddr, ++ }, ++ } ++ if resolved { ++ req.RemoteIp.HostnameHint = &host ++ } ++ res := &pb.CreateSocketReply{} ++ if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { ++ return nil, err ++ } ++ ++ return &Conn{ ++ ctx: ctx, ++ desc: res.GetSocketDescriptor(), ++ prot: prot, ++ local: res.ProxyExternalIp, ++ remote: req.RemoteIp, ++ }, nil ++} ++ ++// LookupIP returns the given host's IP addresses. ++func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { ++ packedAddrs, _, err := resolve(ctx, ipFamilies, host) ++ if err != nil { ++ return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) ++ } ++ addrs = make([]net.IP, len(packedAddrs)) ++ for i, pa := range packedAddrs { ++ addrs[i] = net.IP(pa) ++ } ++ return addrs, nil ++} ++ ++func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { ++ // Check if it's an IP address. ++ if ip := net.ParseIP(host); ip != nil { ++ if ip := ip.To4(); ip != nil { ++ return [][]byte{ip}, false, nil ++ } ++ return [][]byte{ip}, false, nil ++ } ++ ++ req := &pb.ResolveRequest{ ++ Name: &host, ++ AddressFamilies: fams, ++ } ++ res := &pb.ResolveReply{} ++ if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { ++ // XXX: need to map to pb.ResolveReply_ErrorCode? ++ return nil, false, err ++ } ++ return res.PackedAddress, true, nil ++} ++ ++// withDeadline is like context.WithDeadline, except it ignores the zero deadline. ++func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { ++ if deadline.IsZero() { ++ return parent, func() {} ++ } ++ return context.WithDeadline(parent, deadline) ++} ++ ++// Conn represents a socket connection. ++// It implements net.Conn. ++type Conn struct { ++ ctx context.Context ++ desc string ++ offset int64 ++ ++ prot pb.CreateSocketRequest_SocketProtocol ++ local, remote *pb.AddressPort ++ ++ readDeadline, writeDeadline time.Time // optional ++} ++ ++// SetContext sets the context that is used by this Conn. ++// It is usually used only when using a Conn that was created in a different context, ++// such as when a connection is created during a warmup request but used while ++// servicing a user request. ++func (cn *Conn) SetContext(ctx context.Context) { ++ cn.ctx = ctx ++} ++ ++func (cn *Conn) Read(b []byte) (n int, err error) { ++ const maxRead = 1 << 20 ++ if len(b) > maxRead { ++ b = b[:maxRead] ++ } ++ ++ req := &pb.ReceiveRequest{ ++ SocketDescriptor: &cn.desc, ++ DataSize: proto.Int32(int32(len(b))), ++ } ++ res := &pb.ReceiveReply{} ++ if !cn.readDeadline.IsZero() { ++ req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) ++ } ++ ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) ++ defer cancel() ++ if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { ++ return 0, err ++ } ++ if len(res.Data) == 0 { ++ return 0, io.EOF ++ } ++ if len(res.Data) > len(b) { ++ return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) ++ } ++ return copy(b, res.Data), nil ++} ++ ++func (cn *Conn) Write(b []byte) (n int, err error) { ++ const lim = 1 << 20 // max per chunk ++ ++ for n < len(b) { ++ chunk := b[n:] ++ if len(chunk) > lim { ++ chunk = chunk[:lim] ++ } ++ ++ req := &pb.SendRequest{ ++ SocketDescriptor: &cn.desc, ++ Data: chunk, ++ StreamOffset: &cn.offset, ++ } ++ res := &pb.SendReply{} ++ if !cn.writeDeadline.IsZero() { ++ req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) ++ } ++ ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) ++ defer cancel() ++ if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { ++ // assume zero bytes were sent in this RPC ++ break ++ } ++ n += int(res.GetDataSent()) ++ cn.offset += int64(res.GetDataSent()) ++ } ++ ++ return ++} ++ ++func (cn *Conn) Close() error { ++ req := &pb.CloseRequest{ ++ SocketDescriptor: &cn.desc, ++ } ++ res := &pb.CloseReply{} ++ if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { ++ return err ++ } ++ cn.desc = "CLOSED" ++ return nil ++} ++ ++func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { ++ if ap == nil { ++ return nil ++ } ++ switch prot { ++ case pb.CreateSocketRequest_TCP: ++ return &net.TCPAddr{ ++ IP: net.IP(ap.PackedAddress), ++ Port: int(*ap.Port), ++ } ++ case pb.CreateSocketRequest_UDP: ++ return &net.UDPAddr{ ++ IP: net.IP(ap.PackedAddress), ++ Port: int(*ap.Port), ++ } ++ } ++ panic("unknown protocol " + prot.String()) ++} ++ ++func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } ++func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } ++ ++func (cn *Conn) SetDeadline(t time.Time) error { ++ cn.readDeadline = t ++ cn.writeDeadline = t ++ return nil ++} ++ ++func (cn *Conn) SetReadDeadline(t time.Time) error { ++ cn.readDeadline = t ++ return nil ++} ++ ++func (cn *Conn) SetWriteDeadline(t time.Time) error { ++ cn.writeDeadline = t ++ return nil ++} ++ ++// KeepAlive signals that the connection is still in use. ++// It may be called to prevent the socket being closed due to inactivity. ++func (cn *Conn) KeepAlive() error { ++ req := &pb.GetSocketNameRequest{ ++ SocketDescriptor: &cn.desc, ++ } ++ res := &pb.GetSocketNameReply{} ++ return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) ++} ++ ++func init() { ++ internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) ++} +diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go +new file mode 100644 +index 00000000000..c804169a1c0 +--- /dev/null ++++ b/vendor/google.golang.org/appengine/socket/socket_vm.go +@@ -0,0 +1,64 @@ ++// Copyright 2015 Google Inc. All rights reserved. ++// Use of this source code is governed by the Apache 2.0 ++// license that can be found in the LICENSE file. ++ ++// +build !appengine ++ ++package socket ++ ++import ( ++ "net" ++ "time" ++ ++ "golang.org/x/net/context" ++) ++ ++// Dial connects to the address addr on the network protocol. ++// The address format is host:port, where host may be a hostname or an IP address. ++// Known protocols are "tcp" and "udp". ++// The returned connection satisfies net.Conn, and is valid while ctx is valid; ++// if the connection is to be used after ctx becomes invalid, invoke SetContext ++// with the new context. ++func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { ++ conn, err := net.Dial(protocol, addr) ++ if err != nil { ++ return nil, err ++ } ++ return &Conn{conn}, nil ++} ++ ++// DialTimeout is like Dial but takes a timeout. ++// The timeout includes name resolution, if required. ++func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { ++ conn, err := net.DialTimeout(protocol, addr, timeout) ++ if err != nil { ++ return nil, err ++ } ++ return &Conn{conn}, nil ++} ++ ++// LookupIP returns the given host's IP addresses. ++func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { ++ return net.LookupIP(host) ++} ++ ++// Conn represents a socket connection. ++// It implements net.Conn. ++type Conn struct { ++ net.Conn ++} ++ ++// SetContext sets the context that is used by this Conn. ++// It is usually used only when using a Conn that was created in a different context, ++// such as when a connection is created during a warmup request but used while ++// servicing a user request. ++func (cn *Conn) SetContext(ctx context.Context) { ++ // This function is not required in App Engine "flexible environment". ++} ++ ++// KeepAlive signals that the connection is still in use. ++// It may be called to prevent the socket being closed due to inactivity. ++func (cn *Conn) KeepAlive() error { ++ // This function is not required in App Engine "flexible environment". ++ return nil ++} +diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +index dbe2e2d0c65..6ce01ac9a69 100644 +--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go ++++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +@@ -15,7 +15,7 @@ + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: + // protoc-gen-go v1.26.0 +-// protoc v3.21.9 ++// protoc v3.21.12 + // source: google/api/field_behavior.proto + + package annotations +@@ -78,6 +78,19 @@ const ( + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 ++ // Denotes that the field in a resource (a message annotated with ++ // google.api.resource) is used in the resource name to uniquely identify the ++ // resource. For AIP-compliant APIs, this should only be applied to the ++ // `name` field on the resource. ++ // ++ // This behavior should not be applied to references to other resources within ++ // the message. ++ // ++ // The identifier field of resources often have different field behavior ++ // depending on the request it is embedded in (e.g. for Create methods name ++ // is optional and unused, while for Update methods it is required). Instead ++ // of method-specific annotations, only `IDENTIFIER` is required. ++ FieldBehavior_IDENTIFIER FieldBehavior = 8 + ) + + // Enum value maps for FieldBehavior. +@@ -91,6 +104,7 @@ var ( + 5: "IMMUTABLE", + 6: "UNORDERED_LIST", + 7: "NON_EMPTY_DEFAULT", ++ 8: "IDENTIFIER", + } + FieldBehavior_value = map[string]int32{ + "FIELD_BEHAVIOR_UNSPECIFIED": 0, +@@ -101,6 +115,7 @@ var ( + "IMMUTABLE": 5, + "UNORDERED_LIST": 6, + "NON_EMPTY_DEFAULT": 7, ++ "IDENTIFIER": 8, + } + ) + +@@ -169,7 +184,7 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ + 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, +- 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, ++ 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, + 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, +@@ -179,7 +194,8 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, + 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, + 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, +- 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, ++ 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e, ++ 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, +diff --git a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go b/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go +new file mode 100644 +index 00000000000..1d3f1b5b7ef +--- /dev/null ++++ b/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go +@@ -0,0 +1,23 @@ ++// Copyright 2023 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// This file, and the {{.RootMod}} import, won't actually become part of ++// the resultant binary. ++//go:build modhack ++// +build modhack ++ ++package api ++ ++// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository ++import _ "google.golang.org/genproto/internal" +diff --git a/vendor/google.golang.org/genproto/internal/doc.go b/vendor/google.golang.org/genproto/internal/doc.go +new file mode 100644 +index 00000000000..90e89b4aa3f +--- /dev/null ++++ b/vendor/google.golang.org/genproto/internal/doc.go +@@ -0,0 +1,17 @@ ++// Copyright 2023 Google LLC ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// This file makes internal an importable go package ++// for use with backreferences from submodules. ++package internal +diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md +index 0e6ae69a584..ab0fbb79b86 100644 +--- a/vendor/google.golang.org/grpc/README.md ++++ b/vendor/google.golang.org/grpc/README.md +@@ -1,8 +1,8 @@ + # gRPC-Go + +-[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) + [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] + [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) ++[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) + + The [Go][] implementation of [gRPC][]: A high performance, open source, general + RPC framework that puts mobile and HTTP/2 first. For more information see the +@@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the + + ## Installation + +-With [Go module][] support (Go 1.11+), simply add the following import ++Simply add the following import to your code, and then `go [build|run|test]` ++will automatically fetch the necessary dependencies: ++ + + ```go + import "google.golang.org/grpc" + ``` + +-to your code, and then `go [build|run|test]` will automatically fetch the +-necessary dependencies. +- +-Otherwise, to install the `grpc-go` package, run the following command: +- +-```console +-$ go get -u google.golang.org/grpc +-``` +- + > **Note:** If you are trying to access `grpc-go` from **China**, see the + > [FAQ](#FAQ) below. + +@@ -56,15 +49,6 @@ To build Go code, there are several options: + + - Set up a VPN and access google.golang.org through that. + +-- Without Go module support: `git clone` the repo manually: +- +- ```sh +- git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc +- ``` +- +- You will need to do the same for all of grpc's dependencies in `golang.org`, +- e.g. `golang.org/x/net`. +- + - With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + +@@ -76,33 +60,13 @@ To build Go code, there are several options: + ``` + + Again, this will need to be done for all transitive dependencies hosted on +- golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). ++ golang.org as well. For details, refer to [golang/go issue ++ #28652](https://github.com/golang/go/issues/28652). + + ### Compiling error, undefined: grpc.SupportPackageIsVersion + +-#### If you are using Go modules: +- +-Ensure your gRPC-Go version is `require`d at the appropriate version in +-the same module containing the generated `.pb.go` files. For example, +-`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: +- +-```go +-module +- +-require ( +- google.golang.org/grpc v1.27.0 +-) +-``` +- +-#### If you are *not* using Go modules: +- +-Update the `proto` package, gRPC package, and rebuild the `.proto` files: +- +-```sh +-go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +-go get -u google.golang.org/grpc +-protoc --go_out=plugins=grpc:. *.proto +-``` ++Please update to the latest version of gRPC-Go using ++`go get google.golang.org/grpc`. + + ### How to turn on logging + +@@ -121,9 +85,11 @@ possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown +- 1. Keepalive parameters caused connection shutdown, for example if you have configured +- your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). +- If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), ++ 1. Keepalive parameters caused connection shutdown, for example if you have ++ configured your server to terminate connections regularly to [trigger DNS ++ lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). ++ If this is the case, you may want to increase your ++ [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. + + It can be tricky to debug this because the error happens on the client side but +diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go +index 3efca459149..52d530d7ad0 100644 +--- a/vendor/google.golang.org/grpc/attributes/attributes.go ++++ b/vendor/google.golang.org/grpc/attributes/attributes.go +@@ -34,26 +34,26 @@ import ( + // key/value pairs. Keys must be hashable, and users should define their own + // types for keys. Values should not be modified after they are added to an + // Attributes or if they were received from one. If values implement 'Equal(o +-// interface{}) bool', it will be called by (*Attributes).Equal to determine +-// whether two values with the same key should be considered equal. ++// any) bool', it will be called by (*Attributes).Equal to determine whether ++// two values with the same key should be considered equal. + type Attributes struct { +- m map[interface{}]interface{} ++ m map[any]any + } + + // New returns a new Attributes containing the key/value pair. +-func New(key, value interface{}) *Attributes { +- return &Attributes{m: map[interface{}]interface{}{key: value}} ++func New(key, value any) *Attributes { ++ return &Attributes{m: map[any]any{key: value}} + } + + // WithValue returns a new Attributes containing the previous keys and values + // and the new key/value pair. If the same key appears multiple times, the + // last value overwrites all previous values for that key. To remove an + // existing key, use a nil value. value should not be modified later. +-func (a *Attributes) WithValue(key, value interface{}) *Attributes { ++func (a *Attributes) WithValue(key, value any) *Attributes { + if a == nil { + return New(key, value) + } +- n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} ++ n := &Attributes{m: make(map[any]any, len(a.m)+1)} + for k, v := range a.m { + n.m[k] = v + } +@@ -63,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { + + // Value returns the value associated with these attributes for key, or nil if + // no value is associated with key. The returned value should not be modified. +-func (a *Attributes) Value(key interface{}) interface{} { ++func (a *Attributes) Value(key any) any { + if a == nil { + return nil + } + return a.m[key] + } + +-// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +-// bool' is implemented for a value in the attributes, it is called to +-// determine if the value matches the one stored in the other attributes. If +-// Equal is not implemented, standard equality is used to determine if the two +-// values are equal. Note that some types (e.g. maps) aren't comparable by +-// default, so they must be wrapped in a struct, or in an alias type, with Equal +-// defined. ++// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is ++// implemented for a value in the attributes, it is called to determine if the ++// value matches the one stored in the other attributes. If Equal is not ++// implemented, standard equality is used to determine if the two values are ++// equal. Note that some types (e.g. maps) aren't comparable by default, so ++// they must be wrapped in a struct, or in an alias type, with Equal defined. + func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true +@@ -93,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { + // o missing element of a + return false + } +- if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { ++ if eq, ok := v.(interface{ Equal(o any) bool }); ok { + if !eq.Equal(ov) { + return false + } +@@ -112,19 +111,31 @@ func (a *Attributes) String() string { + sb.WriteString("{") + first := true + for k, v := range a.m { +- var key, val string +- if str, ok := k.(interface{ String() string }); ok { +- key = str.String() +- } +- if str, ok := v.(interface{ String() string }); ok { +- val = str.String() +- } + if !first { + sb.WriteString(", ") + } +- sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) ++ sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() + } ++ ++func str(x any) (s string) { ++ if v, ok := x.(fmt.Stringer); ok { ++ return fmt.Sprint(v) ++ } else if v, ok := x.(string); ok { ++ return v ++ } ++ return fmt.Sprintf("<%p>", x) ++} ++ ++// MarshalJSON helps implement the json.Marshaler interface, thereby rendering ++// the Attributes correctly when printing (via pretty.JSON) structs containing ++// Attributes as fields. ++// ++// Is it impossible to unmarshal attributes from a JSON representation and this ++// method is meant only for debugging purposes. ++func (a *Attributes) MarshalJSON() ([]byte, error) { ++ return []byte(a.String()), nil ++} +diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go +index 8f00523c0e2..d79560a2e26 100644 +--- a/vendor/google.golang.org/grpc/balancer/balancer.go ++++ b/vendor/google.golang.org/grpc/balancer/balancer.go +@@ -30,6 +30,7 @@ import ( + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +@@ -39,6 +40,8 @@ import ( + var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) ++ ++ logger = grpclog.Component("balancer") + ) + + // Register registers the balancer builder to the balancer map. b.Name +@@ -51,6 +54,12 @@ var ( + // an init() function), and is not thread-safe. If multiple Balancers are + // registered with the same name, the one registered last will take effect. + func Register(b Builder) { ++ if strings.ToLower(b.Name()) != b.Name() { ++ // TODO: Skip the use of strings.ToLower() to index the map after v1.59 ++ // is released to switch to case sensitive balancer registry. Also, ++ // remove this warning and update the docstrings for Register and Get. ++ logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) ++ } + m[strings.ToLower(b.Name())] = b + } + +@@ -70,6 +79,12 @@ func init() { + // Note that the compare is done in a case-insensitive fashion. + // If no builder is register with the name, nil will be returned. + func Get(name string) Builder { ++ if strings.ToLower(name) != name { ++ // TODO: Skip the use of strings.ToLower() to index the map after v1.59 ++ // is released to switch to case sensitive balancer registry. Also, ++ // remove this warning and update the docstrings for Register and Get. ++ logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) ++ } + if b, ok := m[strings.ToLower(name)]; ok { + return b + } +@@ -105,8 +120,8 @@ type SubConn interface { + // + // This will trigger a state transition for the SubConn. + // +- // Deprecated: This method is now part of the ClientConn interface and will +- // eventually be removed from here. ++ // Deprecated: this method will be removed. Create new SubConns for new ++ // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +@@ -115,6 +130,13 @@ type SubConn interface { + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) ++ // Shutdown shuts down the SubConn gracefully. Any started RPCs will be ++ // allowed to complete. No future calls should be made on the SubConn. ++ // One final state update will be delivered to the StateListener (or ++ // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to ++ // indicate the shutdown operation. This may be delivered before ++ // in-progress RPCs are complete and the actual connection is closed. ++ Shutdown() + } + + // NewSubConnOptions contains options to create new SubConn. +@@ -129,6 +151,11 @@ type NewSubConnOptions struct { + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool ++ // StateListener is called when the state of the subconn changes. If nil, ++ // Balancer.UpdateSubConnState will be called instead. Will never be ++ // invoked until after Connect() is called on the SubConn created with ++ // these options. ++ StateListener func(SubConnState) + } + + // State contains the balancer's state relevant to the gRPC ClientConn. +@@ -150,16 +177,24 @@ type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. ++ // ++ // Deprecated: please be aware that in a future version, SubConns will only ++ // support one address per SubConn. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. ++ // ++ // Deprecated: use SubConn.Shutdown instead. + RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // +- // This will trigger a state transition for the SubConn. ++ // This may trigger a state transition for the SubConn. ++ // ++ // Deprecated: this method will be removed. Create new SubConns for new ++ // addresses instead. + UpdateAddresses(SubConn, []resolver.Address) + + // UpdateState notifies gRPC that the balancer's internal state has +@@ -250,7 +285,7 @@ type DoneInfo struct { + // trailing metadata. + // + // The only supported type now is *orca_v3.LoadReport. +- ServerLoad interface{} ++ ServerLoad any + } + + var ( +@@ -343,9 +378,13 @@ type Balancer interface { + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. ++ // ++ // Deprecated: Use NewSubConnOptions.StateListener when creating the ++ // SubConn instead. + UpdateSubConnState(SubConn, SubConnState) +- // Close closes the balancer. The balancer is not required to call +- // ClientConn.RemoveSubConn for its existing SubConns. ++ // Close closes the balancer. The balancer is not currently required to ++ // call SubConn.Shutdown for its existing SubConns; however, this will be ++ // required in a future release, so it is recommended. + Close() + } + +@@ -390,15 +429,14 @@ var ErrBadResolverState = errors.New("bad resolver state") + type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the +- // associated SubConn), but is declared as interface{} to avoid a +- // dependency cycle. Should also return a close function that will be +- // called when all references to the Producer have been given up. +- Build(grpcClientConnInterface interface{}) (p Producer, close func()) ++ // associated SubConn), but is declared as `any` to avoid a dependency ++ // cycle. Should also return a close function that will be called when all ++ // references to the Producer have been given up. ++ Build(grpcClientConnInterface any) (p Producer, close func()) + } + + // A Producer is a type shared among potentially many consumers. It is + // associated with a SubConn, and an implementation will typically contain + // other methods to provide additional functionality, e.g. configuration or + // subscription registration. +-type Producer interface { +-} ++type Producer any +diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go +index 3929c26d31e..a7f1eeec8e6 100644 +--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go ++++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go +@@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { + // a is a new address (not existing in b.subConns). +- sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) ++ var sc balancer.SubConn ++ opts := balancer.NewSubConnOptions{ ++ HealthCheckEnabled: b.config.HealthCheck, ++ StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, ++ } ++ sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue +@@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + sc := sci.(balancer.SubConn) + // a was removed by resolver. + if _, ok := addrsSet.Get(a); !ok { +- b.cc.RemoveSubConn(sc) ++ sc.Shutdown() + b.subConns.Delete(a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. +- // The entry will be deleted in UpdateSubConnState. ++ // The entry will be deleted in updateSubConnState. + } + } + // If resolver state contains no addresses, return an error so ClientConn +@@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) + } + ++// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. + func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { ++ logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) ++} ++ ++func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) +@@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: +- // When an address was removed by resolver, b called RemoveSubConn but +- // kept the sc's state in scStates. Remove state for this sc here. ++ // When an address was removed by resolver, b called Shutdown but kept ++ // the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. +@@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su + } + + // Close is a nop because base balancer doesn't have internal state to clean up, +-// and it doesn't need to call RemoveSubConn for the SubConns. ++// and it doesn't need to call Shutdown for the SubConns. + func (b *baseBalancer) Close() { + } + +diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +index 04b9ad41169..a4411c22bfc 100644 +--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go ++++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +@@ -99,20 +99,6 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { +- // If the addresses specified in the update contain addresses of type +- // "grpclb" and the selected LB policy is not "grpclb", these addresses +- // will be filtered out and ccs will be modified with the updated +- // address list. +- if ccb.curBalancerName != grpclbName { +- var addrs []resolver.Address +- for _, addr := range ccs.ResolverState.Addresses { +- if addr.Type == resolver.GRPCLB { +- continue +- } +- addrs = append(addrs, addr) +- } +- ccs.ResolverState.Addresses = addrs +- } + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { +@@ -139,7 +125,9 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat + func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { +- ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) ++ // Even though it is optional for balancers, gracefulswitch ensures ++ // opts.StateListener is set, so this cannot ever be nil. ++ sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) + ccb.mu.Unlock() + } +@@ -221,7 +209,7 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + } + + ccb.mode = m +- done := ccb.serializer.Done ++ done := ccb.serializer.Done() + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent +@@ -238,11 +226,9 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + } + ccb.mu.Unlock() + +- // Give enqueued callbacks a chance to finish. ++ // Give enqueued callbacks a chance to finish before closing the balancer. + <-done +- // Spawn a goroutine to close the balancer (since it may block trying to +- // cleanup all allocated resources) and return early. +- go b.Close() ++ b.Close() + } + + // exitIdleMode is invoked by grpc when the channel exits idle mode either +@@ -314,29 +300,19 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } +- acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} ++ acbw := &acBalancerWrapper{ ++ ccb: ccb, ++ ac: ac, ++ producers: make(map[balancer.ProducerBuilder]*refCountedProducer), ++ stateListener: opts.StateListener, ++ } + ac.acbw = acbw + return acbw, nil + } + + func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +- if ccb.isIdleOrClosed() { +- // It it safe to ignore this call when the balancer is closed or in idle +- // because the ClientConn takes care of closing the connections. +- // +- // Not returning early from here when the balancer is closed or in idle +- // leads to a deadlock though, because of the following sequence of +- // calls when holding cc.mu: +- // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> +- // ccb.RemoveAddrConn --> cc.removeAddrConn +- return +- } +- +- acbw, ok := sc.(*acBalancerWrapper) +- if !ok { +- return +- } +- ccb.cc.removeAddrConn(acbw.ac, errConnDrain) ++ // The graceful switch balancer will never call this. ++ logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") + } + + func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { +@@ -380,7 +356,9 @@ func (ccb *ccBalancerWrapper) Target() string { + // acBalancerWrapper is a wrapper on top of ac for balancers. + // It implements balancer.SubConn interface. + type acBalancerWrapper struct { +- ac *addrConn // read-only ++ ac *addrConn // read-only ++ ccb *ccBalancerWrapper // read-only ++ stateListener func(balancer.SubConnState) + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +@@ -398,6 +376,23 @@ func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() + } + ++func (acbw *acBalancerWrapper) Shutdown() { ++ ccb := acbw.ccb ++ if ccb.isIdleOrClosed() { ++ // It it safe to ignore this call when the balancer is closed or in idle ++ // because the ClientConn takes care of closing the connections. ++ // ++ // Not returning early from here when the balancer is closed or in idle ++ // leads to a deadlock though, because of the following sequence of ++ // calls when holding cc.mu: ++ // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> ++ // ccb.RemoveAddrConn --> cc.removeAddrConn ++ return ++ } ++ ++ ccb.cc.removeAddrConn(acbw.ac, errConnDrain) ++} ++ + // NewStream begins a streaming RPC on the addrConn. If the addrConn is not + // ready, blocks until it is or ctx expires. Returns an error when the context + // expires or the addrConn is shut down. +@@ -411,7 +406,7 @@ func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, + + // Invoke performs a unary RPC. If the addrConn is not ready, returns + // errSubConnNotReady. +-func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { ++func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err +diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +index ec2c2fa14dd..5954801122a 100644 +--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go ++++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +@@ -18,7 +18,7 @@ + + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: +-// protoc-gen-go v1.30.0 ++// protoc-gen-go v1.31.0 + // protoc v4.22.0 + // source: grpc/binlog/v1/binarylog.proto + +diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go +index e6a1dc5d75e..788c89c16f9 100644 +--- a/vendor/google.golang.org/grpc/call.go ++++ b/vendor/google.golang.org/grpc/call.go +@@ -26,12 +26,7 @@ import ( + // received. This is typically called by generated code. + // + // All errors returned by Invoke are compatible with the status package. +-func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { +- if err := cc.idlenessMgr.onCallBegin(); err != nil { +- return err +- } +- defer cc.idlenessMgr.onCallEnd() +- ++func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) +@@ -61,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // received. This is typically called by generated code. + // + // DEPRECATED: Use ClientConn.Invoke instead. +-func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { ++func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) + } + + var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +-func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { ++func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err +diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go +index 95a7459b02f..429c389e473 100644 +--- a/vendor/google.golang.org/grpc/clientconn.go ++++ b/vendor/google.golang.org/grpc/clientconn.go +@@ -34,9 +34,12 @@ import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" ++ "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" ++ "google.golang.org/grpc/internal/idle" ++ "google.golang.org/grpc/internal/pretty" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" +@@ -53,8 +56,6 @@ import ( + const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +- // must match grpclbName in grpclb/grpclb.go +- grpclbName = "grpclb" + ) + + var ( +@@ -137,7 +138,6 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires + func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, +- csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), +@@ -190,6 +190,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * + // Register ClientConn with channelz. + cc.channelzRegistration(target) + ++ cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) ++ + if err := cc.validateTransportCredentials(); err != nil { + return nil, err + } +@@ -265,7 +267,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. +- cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) ++ cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) + + // Return early for non-blocking dials. + if !cc.dopts.block { +@@ -316,6 +318,16 @@ func (cc *ClientConn) addTraceEvent(msg string) { + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + } + ++type idler ClientConn ++ ++func (i *idler) EnterIdleMode() error { ++ return (*ClientConn)(i).enterIdleMode() ++} ++ ++func (i *idler) ExitIdleMode() error { ++ return (*ClientConn)(i).exitIdleMode() ++} ++ + // exitIdleMode moves the channel out of idle mode by recreating the name + // resolver and load balancer. + func (cc *ClientConn) exitIdleMode() error { +@@ -325,8 +337,8 @@ func (cc *ClientConn) exitIdleMode() error { + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { ++ channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) + cc.mu.Unlock() +- logger.Info("ClientConn asked to exit idle mode when not in idle mode") + return nil + } + +@@ -349,7 +361,7 @@ func (cc *ClientConn) exitIdleMode() error { + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { +- cc.blockingpicker = newPickerWrapper() ++ cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true +@@ -392,12 +404,13 @@ func (cc *ClientConn) exitIdleMode() error { + // name resolver, load balancer and any subchannels. + func (cc *ClientConn) enterIdleMode() error { + cc.mu.Lock() ++ defer cc.mu.Unlock() ++ + if cc.conns == nil { +- cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { +- logger.Error("ClientConn asked to enter idle mode when not active") ++ channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) + return nil + } + +@@ -418,14 +431,14 @@ func (cc *ClientConn) enterIdleMode() error { + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle +- cc.mu.Unlock() ++ cc.addTraceEvent("entering idle mode") + + go func() { +- cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() ++ + return nil + } + +@@ -474,7 +487,6 @@ func (cc *ClientConn) validateTransportCredentials() error { + func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") +- cc.csMgr.channelzID = cc.channelzID + } + + // chainUnaryClientInterceptors chains all unary client interceptors into one. +@@ -491,7 +503,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { +- chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { ++ chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } +@@ -503,7 +515,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final + if curr == len(interceptors)-1 { + return finalInvoker + } +- return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { ++ return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } + } +@@ -539,13 +551,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr + } + } + ++// newConnectivityStateManager creates an connectivityStateManager with ++// the specified id. ++func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { ++ return &connectivityStateManager{ ++ channelzID: id, ++ pubSub: grpcsync.NewPubSub(ctx), ++ } ++} ++ + // connectivityStateManager keeps the connectivity.State of ClientConn. + // This struct will eventually be exported so the balancers can access it. ++// ++// TODO: If possible, get rid of the `connectivityStateManager` type, and ++// provide this functionality using the `PubSub`, to avoid keeping track of ++// the connectivity state at two places. + type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID *channelz.Identifier ++ pubSub *grpcsync.PubSub + } + + // updateState updates the connectivity.State of ClientConn. +@@ -561,6 +587,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { + return + } + csm.state = state ++ csm.pubSub.Publish(state) ++ + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. +@@ -590,7 +618,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. +- Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error ++ Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) + } +@@ -622,7 +650,7 @@ type ClientConn struct { + channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. +- idlenessMgr idlenessManager ++ idlenessMgr idle.Manager + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. +@@ -668,6 +696,19 @@ const ( + ccIdlenessStateExitingIdle + ) + ++func (s ccIdlenessState) String() string { ++ switch s { ++ case ccIdlenessStateActive: ++ return "active" ++ case ccIdlenessStateIdle: ++ return "idle" ++ case ccIdlenessStateExitingIdle: ++ return "exitingIdle" ++ default: ++ return "unknown" ++ } ++} ++ + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or + // ctx expires. A true value is returned in former case and false in latter. + // +@@ -759,6 +800,16 @@ func init() { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) ++ ++ internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { ++ return cc.csMgr.pubSub.Subscribe(s) ++ } ++ internal.EnterIdleModeForTesting = func(cc *ClientConn) error { ++ return cc.enterIdleMode() ++ } ++ internal.ExitIdleModeForTesting = func(cc *ClientConn) error { ++ return cc.exitIdleMode() ++ } + } + + func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { +@@ -867,6 +918,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi + cc.balancerWrapper.updateSubConnState(sc, s, err) + } + ++// Makes a copy of the input addresses slice and clears out the balancer ++// attributes field. Addresses are passed during subconn creation and address ++// update operations. In both cases, we will clear the balancer attributes by ++// calling this function, and therefore we will be able to use the Equal method ++// provided by the resolver.Address type for comparison. ++func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { ++ out := make([]resolver.Address, len(in)) ++ for i := range in { ++ out[i] = in[i] ++ out[i].BalancerAttributes = nil ++ } ++ return out ++} ++ + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. + // + // Caller needs to make sure len(addrs) > 0. +@@ -874,7 +939,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, +- addrs: addrs, ++ addrs: copyAddressesWithoutBalancerAttributes(addrs), + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), +@@ -995,8 +1060,9 @@ func equalAddresses(a, b []resolver.Address) bool { + // connections or connection attempts. + func (ac *addrConn) updateAddrs(addrs []resolver.Address) { + ac.mu.Lock() +- channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) ++ channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + ++ addrs = copyAddressesWithoutBalancerAttributes(addrs) + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return +@@ -1031,8 +1097,8 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + +- // We have to defer here because GracefulClose => Close => onClose, which +- // requires locking ac.mu. ++ // We have to defer here because GracefulClose => onClose, which requires ++ // locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil +@@ -1137,23 +1203,13 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel + } + + var newBalancerName string +- if cc.sc != nil && cc.sc.lbConfig != nil { ++ if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { ++ // No service config or no LB policy specified in config. ++ newBalancerName = PickFirstBalancerName ++ } else if cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name +- } else { +- var isGRPCLB bool +- for _, a := range addrs { +- if a.Type == resolver.GRPCLB { +- isGRPCLB = true +- break +- } +- } +- if isGRPCLB { +- newBalancerName = grpclbName +- } else if cc.sc != nil && cc.sc.LB != nil { +- newBalancerName = *cc.sc.LB +- } else { +- newBalancerName = PickFirstBalancerName +- } ++ } else { // cc.sc.LB != nil ++ newBalancerName = *cc.sc.LB + } + cc.balancerWrapper.switchTo(newBalancerName) + } +@@ -1192,7 +1248,10 @@ func (cc *ClientConn) ResetConnectBackoff() { + + // Close tears down the ClientConn and all underlying connections. + func (cc *ClientConn) Close() error { +- defer cc.cancel() ++ defer func() { ++ cc.cancel() ++ <-cc.csMgr.pubSub.Done() ++ }() + + cc.mu.Lock() + if cc.conns == nil { +@@ -1226,7 +1285,7 @@ func (cc *ClientConn) Close() error { + rWrapper.close() + } + if idlenessMgr != nil { +- idlenessMgr.close() ++ idlenessMgr.Close() + } + + for ac := range conns { +@@ -1336,12 +1395,14 @@ func (ac *addrConn) resetTransport() { + + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) +- // After exhausting all addresses, the addrConn enters +- // TRANSIENT_FAILURE. ++ ac.mu.Lock() + if acCtx.Err() != nil { ++ // addrConn was torn down. ++ ac.mu.Unlock() + return + } +- ac.mu.Lock() ++ // After exhausting all addresses, the addrConn enters ++ // TRANSIENT_FAILURE. + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. +@@ -1537,7 +1598,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { + + // Set up the health check helper functions. + currentTr := ac.transport +- newStream := func(method string) (interface{}, error) { ++ newStream := func(method string) (any, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() +@@ -1625,16 +1686,7 @@ func (ac *addrConn) tearDown(err error) { + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} +- if err == errConnDrain && curTr != nil { +- // GracefulClose(...) may be executed multiple times when +- // i) receiving multiple GoAway frames from the server; or +- // ii) there are concurrent name resolver/Balancer triggered +- // address removal and GoAway. +- // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. +- ac.mu.Unlock() +- curTr.GracefulClose() +- ac.mu.Lock() +- } ++ + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, +@@ -1648,6 +1700,29 @@ func (ac *addrConn) tearDown(err error) { + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) + ac.mu.Unlock() ++ ++ // We have to release the lock before the call to GracefulClose/Close here ++ // because both of them call onClose(), which requires locking ac.mu. ++ if curTr != nil { ++ if err == errConnDrain { ++ // Close the transport gracefully when the subConn is being shutdown. ++ // ++ // GracefulClose() may be executed multiple times if: ++ // - multiple GoAway frames are received from the server ++ // - there are concurrent name resolver or balancer triggered ++ // address removal and GoAway ++ curTr.GracefulClose() ++ } else { ++ // Hard close the transport when the channel is entering idle or is ++ // being shutdown. In the case where the channel is being shutdown, ++ // closing of transports is also taken care of by cancelation of cc.ctx. ++ // But in the case where the channel is entering idle, we need to ++ // explicitly close the transports here. Instead of distinguishing ++ // between these two cases, it is simpler to close the transport ++ // unconditionally here. ++ curTr.Close(err) ++ } ++ } + } + + func (ac *addrConn) getState() connectivity.State { +@@ -1807,19 +1882,70 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { + } + + // parseTarget uses RFC 3986 semantics to parse the given target into a +-// resolver.Target struct containing scheme, authority and url. Query +-// params are stripped from the endpoint. ++// resolver.Target struct containing url. Query params are stripped from the ++// endpoint. + func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + +- return resolver.Target{ +- Scheme: u.Scheme, +- Authority: u.Host, +- URL: *u, +- }, nil ++ return resolver.Target{URL: *u}, nil ++} ++ ++func encodeAuthority(authority string) string { ++ const upperhex = "0123456789ABCDEF" ++ ++ // Return for characters that must be escaped as per ++ // Valid chars are mentioned here: ++ // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 ++ shouldEscape := func(c byte) bool { ++ // Alphanum are always allowed. ++ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { ++ return false ++ } ++ switch c { ++ case '-', '_', '.', '~': // Unreserved characters ++ return false ++ case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters ++ return false ++ case ':', '[', ']', '@': // Authority related delimeters ++ return false ++ } ++ // Everything else must be escaped. ++ return true ++ } ++ ++ hexCount := 0 ++ for i := 0; i < len(authority); i++ { ++ c := authority[i] ++ if shouldEscape(c) { ++ hexCount++ ++ } ++ } ++ ++ if hexCount == 0 { ++ return authority ++ } ++ ++ required := len(authority) + 2*hexCount ++ t := make([]byte, required) ++ ++ j := 0 ++ // This logic is a barebones version of escape in the go net/url library. ++ for i := 0; i < len(authority); i++ { ++ switch c := authority[i]; { ++ case shouldEscape(c): ++ t[j] = '%' ++ t[j+1] = upperhex[c>>4] ++ t[j+2] = upperhex[c&15] ++ j += 3 ++ default: ++ t[j] = authority[i] ++ j++ ++ } ++ } ++ return string(t) + } + + // Determine channel authority. The order of precedence is as follows: +@@ -1872,7 +1998,11 @@ func (cc *ClientConn) determineAuthority() error { + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. +- cc.authority = endpoint ++ // Escape the endpoint to handle use cases where the endpoint ++ // might not be a valid authority by default. ++ // For example an endpoint which has multiple paths like ++ // 'a/b/c', which is not a valid authority by default. ++ cc.authority = encodeAuthority(endpoint) + } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go +index 12977654781..411e3dfd47c 100644 +--- a/vendor/google.golang.org/grpc/codec.go ++++ b/vendor/google.golang.org/grpc/codec.go +@@ -27,8 +27,8 @@ import ( + // omits the name/string, which vary between the two and are not needed for + // anything besides the registry in the encoding package. + type baseCodec interface { +- Marshal(v interface{}) ([]byte, error) +- Unmarshal(data []byte, v interface{}) error ++ Marshal(v any) ([]byte, error) ++ Unmarshal(data []byte, v any) error + } + + var _ baseCodec = Codec(nil) +@@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) + // Deprecated: use encoding.Codec instead. + type Codec interface { + // Marshal returns the wire format of v. +- Marshal(v interface{}) ([]byte, error) ++ Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. +- Unmarshal(data []byte, v interface{}) error ++ Unmarshal(data []byte, v any) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go +index 15a3d5102a9..cfc9fd85e8d 100644 +--- a/vendor/google.golang.org/grpc/dialoptions.go ++++ b/vendor/google.golang.org/grpc/dialoptions.go +@@ -78,6 +78,7 @@ type dialOptions struct { + defaultServiceConfigRawJSON *string + resolvers []resolver.Builder + idleTimeout time.Duration ++ recvBufferPool SharedBufferPool + } + + // DialOption configures how we set up the connection. +@@ -138,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} + } + ++// WithSharedWriteBuffer allows reusing per-connection transport write buffer. ++// If this option is set to true every connection will release the buffer after ++// flushing the data on the wire. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func WithSharedWriteBuffer(val bool) DialOption { ++ return newFuncDialOption(func(o *dialOptions) { ++ o.copts.SharedWriteBuffer = val ++ }) ++} ++ + // WithWriteBufferSize determines how much data can be batched before doing a + // write on the wire. The corresponding memory allocation for this buffer will + // be twice the size to keep syscalls low. The default value for this buffer is +@@ -628,6 +643,8 @@ func defaultDialOptions() dialOptions { + ReadBufferSize: defaultReadBufSize, + UseProxy: true, + }, ++ recvBufferPool: nopBufferPool{}, ++ idleTimeout: 30 * time.Minute, + } + } + +@@ -664,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { + // channel will exit idle mode when the Connect() method is called or when an + // RPC is initiated. + // +-// By default this feature is disabled, which can also be explicitly configured +-// by passing zero to this function. ++// A default timeout of 30 minutes will be used if this dial option is not set ++// at dial time and idleness can be disabled by passing a timeout of zero. + // + // # Experimental + // +@@ -676,3 +693,24 @@ func WithIdleTimeout(d time.Duration) DialOption { + o.idleTimeout = d + }) + } ++ ++// WithRecvBufferPool returns a DialOption that configures the ClientConn ++// to use the provided shared buffer pool for parsing incoming messages. Depending ++// on the application's workload, this could result in reduced memory allocation. ++// ++// If you are unsure about how to implement a memory pool but want to utilize one, ++// begin with grpc.NewSharedBufferPool. ++// ++// Note: The shared buffer pool feature will not be active if any of the following ++// options are used: WithStatsHandler, EnableTracing, or binary logging. In such ++// cases, the shared buffer pool will be ignored. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { ++ return newFuncDialOption(func(o *dialOptions) { ++ o.recvBufferPool = bufferPool ++ }) ++} +diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go +index 07a5861352a..5ebf88d7147 100644 +--- a/vendor/google.golang.org/grpc/encoding/encoding.go ++++ b/vendor/google.golang.org/grpc/encoding/encoding.go +@@ -38,6 +38,10 @@ const Identity = "identity" + + // Compressor is used for compressing and decompressing when sending or + // receiving messages. ++// ++// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, ++// gRPC will invoke it to determine the size of the buffer allocated for the ++// result of decompression. A return value of -1 indicates unknown size. + type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned +@@ -51,15 +55,6 @@ type Compressor interface { + // coding header. The result must be static; the result cannot change + // between calls. + Name() string +- // If a Compressor implements +- // DecompressedSize(compressedBytes []byte) int, gRPC will call it +- // to determine the size of the buffer allocated for the result of decompression. +- // Return -1 to indicate unknown size. +- // +- // Experimental +- // +- // Notice: This API is EXPERIMENTAL and may be changed or removed in a +- // later release. + } + + var registeredCompressor = make(map[string]Compressor) +@@ -90,9 +85,9 @@ func GetCompressor(name string) Compressor { + // methods can be called from concurrent goroutines. + type Codec interface { + // Marshal returns the wire format of v. +- Marshal(v interface{}) ([]byte, error) ++ Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. +- Unmarshal(data []byte, v interface{}) error ++ Unmarshal(data []byte, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. +diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go +index a3bb173c24a..6306e8bb0f0 100644 +--- a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go ++++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go +@@ -40,7 +40,7 @@ const Name = "gzip" + + func init() { + c := &compressor{} +- c.poolCompressor.New = func() interface{} { ++ c.poolCompressor.New = func() any { + return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} + } + encoding.RegisterCompressor(c) +@@ -61,7 +61,7 @@ func SetLevel(level int) error { + return fmt.Errorf("grpc: invalid gzip compression level: %d", level) + } + c := encoding.GetCompressor(Name).(*compressor) +- c.poolCompressor.New = func() interface{} { ++ c.poolCompressor.New = func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) +diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go +index 3009b35afe7..0ee3d3bae97 100644 +--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go ++++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go +@@ -37,7 +37,7 @@ func init() { + // codec is a Codec implementation with protobuf. It is the default codec for gRPC. + type codec struct{} + +-func (codec) Marshal(v interface{}) ([]byte, error) { ++func (codec) Marshal(v any) ([]byte, error) { + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) +@@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { + return proto.Marshal(vv) + } + +-func (codec) Unmarshal(data []byte, v interface{}) error { ++func (codec) Unmarshal(data []byte, v any) error { + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) +diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go +index 8358dd6e2ab..ac73c9ced25 100644 +--- a/vendor/google.golang.org/grpc/grpclog/component.go ++++ b/vendor/google.golang.org/grpc/grpclog/component.go +@@ -31,71 +31,71 @@ type componentData struct { + + var cache = map[string]*componentData{} + +-func (c *componentData) InfoDepth(depth int, args ...interface{}) { +- args = append([]interface{}{"[" + string(c.name) + "]"}, args...) ++func (c *componentData) InfoDepth(depth int, args ...any) { ++ args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) + } + +-func (c *componentData) WarningDepth(depth int, args ...interface{}) { +- args = append([]interface{}{"[" + string(c.name) + "]"}, args...) ++func (c *componentData) WarningDepth(depth int, args ...any) { ++ args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) + } + +-func (c *componentData) ErrorDepth(depth int, args ...interface{}) { +- args = append([]interface{}{"[" + string(c.name) + "]"}, args...) ++func (c *componentData) ErrorDepth(depth int, args ...any) { ++ args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) + } + +-func (c *componentData) FatalDepth(depth int, args ...interface{}) { +- args = append([]interface{}{"[" + string(c.name) + "]"}, args...) ++func (c *componentData) FatalDepth(depth int, args ...any) { ++ args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) + } + +-func (c *componentData) Info(args ...interface{}) { ++func (c *componentData) Info(args ...any) { + c.InfoDepth(1, args...) + } + +-func (c *componentData) Warning(args ...interface{}) { ++func (c *componentData) Warning(args ...any) { + c.WarningDepth(1, args...) + } + +-func (c *componentData) Error(args ...interface{}) { ++func (c *componentData) Error(args ...any) { + c.ErrorDepth(1, args...) + } + +-func (c *componentData) Fatal(args ...interface{}) { ++func (c *componentData) Fatal(args ...any) { + c.FatalDepth(1, args...) + } + +-func (c *componentData) Infof(format string, args ...interface{}) { ++func (c *componentData) Infof(format string, args ...any) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) + } + +-func (c *componentData) Warningf(format string, args ...interface{}) { ++func (c *componentData) Warningf(format string, args ...any) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) + } + +-func (c *componentData) Errorf(format string, args ...interface{}) { ++func (c *componentData) Errorf(format string, args ...any) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) + } + +-func (c *componentData) Fatalf(format string, args ...interface{}) { ++func (c *componentData) Fatalf(format string, args ...any) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) + } + +-func (c *componentData) Infoln(args ...interface{}) { ++func (c *componentData) Infoln(args ...any) { + c.InfoDepth(1, args...) + } + +-func (c *componentData) Warningln(args ...interface{}) { ++func (c *componentData) Warningln(args ...any) { + c.WarningDepth(1, args...) + } + +-func (c *componentData) Errorln(args ...interface{}) { ++func (c *componentData) Errorln(args ...any) { + c.ErrorDepth(1, args...) + } + +-func (c *componentData) Fatalln(args ...interface{}) { ++func (c *componentData) Fatalln(args ...any) { + c.FatalDepth(1, args...) + } + +diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go +index c8bb2be34bf..16928c9cb99 100644 +--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go ++++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go +@@ -42,53 +42,53 @@ func V(l int) bool { + } + + // Info logs to the INFO log. +-func Info(args ...interface{}) { ++func Info(args ...any) { + grpclog.Logger.Info(args...) + } + + // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +-func Infof(format string, args ...interface{}) { ++func Infof(format string, args ...any) { + grpclog.Logger.Infof(format, args...) + } + + // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +-func Infoln(args ...interface{}) { ++func Infoln(args ...any) { + grpclog.Logger.Infoln(args...) + } + + // Warning logs to the WARNING log. +-func Warning(args ...interface{}) { ++func Warning(args ...any) { + grpclog.Logger.Warning(args...) + } + + // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +-func Warningf(format string, args ...interface{}) { ++func Warningf(format string, args ...any) { + grpclog.Logger.Warningf(format, args...) + } + + // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +-func Warningln(args ...interface{}) { ++func Warningln(args ...any) { + grpclog.Logger.Warningln(args...) + } + + // Error logs to the ERROR log. +-func Error(args ...interface{}) { ++func Error(args ...any) { + grpclog.Logger.Error(args...) + } + + // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +-func Errorf(format string, args ...interface{}) { ++func Errorf(format string, args ...any) { + grpclog.Logger.Errorf(format, args...) + } + + // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +-func Errorln(args ...interface{}) { ++func Errorln(args ...any) { + grpclog.Logger.Errorln(args...) + } + + // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. + // It calls os.Exit() with exit code 1. +-func Fatal(args ...interface{}) { ++func Fatal(args ...any) { + grpclog.Logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +@@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { + + // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. + // It calls os.Exit() with exit code 1. +-func Fatalf(format string, args ...interface{}) { ++func Fatalf(format string, args ...any) { + grpclog.Logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +@@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { + + // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. + // It calle os.Exit()) with exit code 1. +-func Fatalln(args ...interface{}) { ++func Fatalln(args ...any) { + grpclog.Logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +@@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { + // Print prints to the logger. Arguments are handled in the manner of fmt.Print. + // + // Deprecated: use Info. +-func Print(args ...interface{}) { ++func Print(args ...any) { + grpclog.Logger.Info(args...) + } + + // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. + // + // Deprecated: use Infof. +-func Printf(format string, args ...interface{}) { ++func Printf(format string, args ...any) { + grpclog.Logger.Infof(format, args...) + } + + // Println prints to the logger. Arguments are handled in the manner of fmt.Println. + // + // Deprecated: use Infoln. +-func Println(args ...interface{}) { ++func Println(args ...any) { + grpclog.Logger.Infoln(args...) + } +diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go +index ef06a4822b7..b1674d8267c 100644 +--- a/vendor/google.golang.org/grpc/grpclog/logger.go ++++ b/vendor/google.golang.org/grpc/grpclog/logger.go +@@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" + // + // Deprecated: use LoggerV2. + type Logger interface { +- Fatal(args ...interface{}) +- Fatalf(format string, args ...interface{}) +- Fatalln(args ...interface{}) +- Print(args ...interface{}) +- Printf(format string, args ...interface{}) +- Println(args ...interface{}) ++ Fatal(args ...any) ++ Fatalf(format string, args ...any) ++ Fatalln(args ...any) ++ Print(args ...any) ++ Printf(format string, args ...any) ++ Println(args ...any) + } + + // SetLogger sets the logger that is used in grpc. Call only from +@@ -45,39 +45,39 @@ type loggerWrapper struct { + Logger + } + +-func (g *loggerWrapper) Info(args ...interface{}) { ++func (g *loggerWrapper) Info(args ...any) { + g.Logger.Print(args...) + } + +-func (g *loggerWrapper) Infoln(args ...interface{}) { ++func (g *loggerWrapper) Infoln(args ...any) { + g.Logger.Println(args...) + } + +-func (g *loggerWrapper) Infof(format string, args ...interface{}) { ++func (g *loggerWrapper) Infof(format string, args ...any) { + g.Logger.Printf(format, args...) + } + +-func (g *loggerWrapper) Warning(args ...interface{}) { ++func (g *loggerWrapper) Warning(args ...any) { + g.Logger.Print(args...) + } + +-func (g *loggerWrapper) Warningln(args ...interface{}) { ++func (g *loggerWrapper) Warningln(args ...any) { + g.Logger.Println(args...) + } + +-func (g *loggerWrapper) Warningf(format string, args ...interface{}) { ++func (g *loggerWrapper) Warningf(format string, args ...any) { + g.Logger.Printf(format, args...) + } + +-func (g *loggerWrapper) Error(args ...interface{}) { ++func (g *loggerWrapper) Error(args ...any) { + g.Logger.Print(args...) + } + +-func (g *loggerWrapper) Errorln(args ...interface{}) { ++func (g *loggerWrapper) Errorln(args ...any) { + g.Logger.Println(args...) + } + +-func (g *loggerWrapper) Errorf(format string, args ...interface{}) { ++func (g *loggerWrapper) Errorf(format string, args ...any) { + g.Logger.Printf(format, args...) + } + +diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go +index 5de66e40d36..ecfd36d7130 100644 +--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go ++++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go +@@ -33,35 +33,35 @@ import ( + // LoggerV2 does underlying logging work for grpclog. + type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +- Info(args ...interface{}) ++ Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +- Infoln(args ...interface{}) ++ Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +- Infof(format string, args ...interface{}) ++ Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +- Warning(args ...interface{}) ++ Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +- Warningln(args ...interface{}) ++ Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +- Warningf(format string, args ...interface{}) ++ Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +- Error(args ...interface{}) ++ Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +- Errorln(args ...interface{}) ++ Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +- Errorf(format string, args ...interface{}) ++ Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatal(args ...interface{}) ++ Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatalln(args ...interface{}) ++ Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatalf(format string, args ...interface{}) ++ Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool + } +@@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { + g.m[severity].Output(2, string(b)) + } + +-func (g *loggerT) Info(args ...interface{}) { ++func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) + } + +-func (g *loggerT) Infoln(args ...interface{}) { ++func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) + } + +-func (g *loggerT) Infof(format string, args ...interface{}) { ++func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) + } + +-func (g *loggerT) Warning(args ...interface{}) { ++func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) + } + +-func (g *loggerT) Warningln(args ...interface{}) { ++func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) + } + +-func (g *loggerT) Warningf(format string, args ...interface{}) { ++func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) + } + +-func (g *loggerT) Error(args ...interface{}) { ++func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) + } + +-func (g *loggerT) Errorln(args ...interface{}) { ++func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) + } + +-func (g *loggerT) Errorf(format string, args ...interface{}) { ++func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) + } + +-func (g *loggerT) Fatal(args ...interface{}) { ++func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) + } + +-func (g *loggerT) Fatalln(args ...interface{}) { ++func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) + } + +-func (g *loggerT) Fatalf(format string, args ...interface{}) { ++func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) + } +@@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool { + type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. +- InfoDepth(depth int, args ...interface{}) ++ InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. +- WarningDepth(depth int, args ...interface{}) ++ WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. +- ErrorDepth(depth int, args ...interface{}) ++ ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. +- FatalDepth(depth int, args ...interface{}) ++ FatalDepth(depth int, args ...any) + } +diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go +index b5bee483802..740745c45f6 100644 +--- a/vendor/google.golang.org/grpc/health/client.go ++++ b/vendor/google.golang.org/grpc/health/client.go +@@ -56,7 +56,7 @@ const healthCheckMethod = "/grpc.health.v1.Health/Watch" + + // This function implements the protocol defined at: + // https://github.com/grpc/grpc/blob/master/doc/health-checking.md +-func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { ++func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error { + tryCnt := 0 + + retryConnection: +diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +index 142d35f753e..24299efd63f 100644 +--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go ++++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +@@ -17,7 +17,7 @@ + + // Code generated by protoc-gen-go. DO NOT EDIT. + // versions: +-// protoc-gen-go v1.30.0 ++// protoc-gen-go v1.31.0 + // protoc v4.22.0 + // source: grpc/health/v1/health.proto + +diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +index a01a1b4d54b..4439cda0f3c 100644 +--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go ++++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +@@ -44,8 +44,15 @@ const ( + // + // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. + type HealthClient interface { +- // If the requested service is unknown, the call will fail with status +- // NOT_FOUND. ++ // Check gets the health of the specified service. If the requested service ++ // is unknown, the call will fail with status NOT_FOUND. If the caller does ++ // not specify a service name, the server should respond with its overall ++ // health status. ++ // ++ // Clients should set a deadline when calling Check, and can declare the ++ // server unhealthy if they do not receive a timely response. ++ // ++ // Check implementations should be idempotent and side effect free. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current +@@ -118,8 +125,15 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { + // All implementations should embed UnimplementedHealthServer + // for forward compatibility + type HealthServer interface { +- // If the requested service is unknown, the call will fail with status +- // NOT_FOUND. ++ // Check gets the health of the specified service. If the requested service ++ // is unknown, the call will fail with status NOT_FOUND. If the caller does ++ // not specify a service name, the server should respond with its overall ++ // health status. ++ // ++ // Clients should set a deadline when calling Check, and can declare the ++ // server unhealthy if they do not receive a timely response. ++ // ++ // Check implementations should be idempotent and side effect free. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current +diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go +index bb96ef57be8..877d78fc3d0 100644 +--- a/vendor/google.golang.org/grpc/interceptor.go ++++ b/vendor/google.golang.org/grpc/interceptor.go +@@ -23,7 +23,7 @@ import ( + ) + + // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +-type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error ++type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error + + // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. + // Unary interceptors can be specified as a DialOption, using +@@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ + // defaults from the ClientConn as well as per-call options. + // + // The returned error must be compatible with the status package. +-type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error ++type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + + // Streamer is called by StreamClientInterceptor to create a ClientStream. + type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) +@@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli + // server side. All per-rpc information may be mutated by the interceptor. + type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. +- Server interface{} ++ Server any + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + } +@@ -78,13 +78,13 @@ type UnaryServerInfo struct { + // status package, or be one of the context errors. Otherwise, gRPC will use + // codes.Unknown as the status code and err.Error() as the status message of the + // RPC. +-type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) ++type UnaryHandler func(ctx context.Context, req any) (any, error) + + // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info + // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper + // of the service method implementation. It is the responsibility of the interceptor to invoke handler + // to complete the RPC. +-type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) ++type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) + + // StreamServerInfo consists of various information about a streaming RPC on + // server side. All per-rpc information may be mutated by the interceptor. +@@ -101,4 +101,4 @@ type StreamServerInfo struct { + // info contains all the information of this RPC the interceptor can operate on. And handler is the + // service method implementation. It is the responsibility of the interceptor to invoke handler to + // complete the RPC. +-type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error ++type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go +index 5fc0ee3da53..fed1c011a32 100644 +--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go ++++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go +@@ -23,6 +23,8 @@ + package backoff + + import ( ++ "context" ++ "errors" + "time" + + grpcbackoff "google.golang.org/grpc/backoff" +@@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { + } + return time.Duration(backoff) + } ++ ++// ErrResetBackoff is the error to be returned by the function executed by RunF, ++// to instruct the latter to reset its backoff state. ++var ErrResetBackoff = errors.New("reset backoff state") ++ ++// RunF provides a convenient way to run a function f repeatedly until the ++// context expires or f returns a non-nil error that is not ErrResetBackoff. ++// When f returns ErrResetBackoff, RunF continues to run f, but resets its ++// backoff state before doing so. backoff accepts an integer representing the ++// number of retries, and returns the amount of time to backoff. ++func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { ++ attempt := 0 ++ timer := time.NewTimer(0) ++ for ctx.Err() == nil { ++ select { ++ case <-timer.C: ++ case <-ctx.Done(): ++ timer.Stop() ++ return ++ } ++ ++ err := f() ++ if errors.Is(err, ErrResetBackoff) { ++ timer.Reset(0) ++ attempt = 0 ++ continue ++ } ++ if err != nil { ++ return ++ } ++ timer.Reset(backoff(attempt)) ++ attempt++ ++ } ++} +diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +index 08666f62a7c..3c594e6e4e5 100644 +--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go ++++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +@@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { + } + } + +-// UpdateSubConnState forwards the update to the appropriate child. +-func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { ++// updateSubConnState forwards the update to the appropriate child. ++func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() +@@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } +- gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. ++ gsb.mu.Unlock() + return + } +- balToUpdate.UpdateSubConnState(sc, state) ++ if state.ConnectivityState == connectivity.Shutdown { ++ delete(balToUpdate.subconns, sc) ++ } ++ gsb.mu.Unlock() ++ if cb != nil { ++ cb(state) ++ } else { ++ balToUpdate.UpdateSubConnState(sc, state) ++ } ++} ++ ++// UpdateSubConnState forwards the update to the appropriate child. ++func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { ++ gsb.updateSubConnState(sc, state, nil) + } + + // Close closes any active child balancers. +@@ -242,7 +255,7 @@ func (gsb *Balancer) Close() { + // + // It implements the balancer.ClientConn interface and is passed down in that + // capacity to the wrapped balancer. It maintains a set of subConns created by +-// the wrapped balancer and calls from the latter to create/update/remove ++// the wrapped balancer and calls from the latter to create/update/shutdown + // SubConns update this set before being forwarded to the parent ClientConn. + // State updates from the wrapped balancer can result in invocation of the + // graceful switch logic. +@@ -254,21 +267,10 @@ type balancerWrapper struct { + subconns map[balancer.SubConn]bool // subconns created by this balancer + } + +-func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +- if state.ConnectivityState == connectivity.Shutdown { +- bw.gsb.mu.Lock() +- delete(bw.subconns, sc) +- bw.gsb.mu.Unlock() +- } +- // There is no need to protect this read with a mutex, as the write to the +- // Balancer field happens in SwitchTo, which completes before this can be +- // called. +- bw.Balancer.UpdateSubConnState(sc, state) +-} +- +-// Close closes the underlying LB policy and removes the subconns it created. bw +-// must not be referenced via balancerCurrent or balancerPending in gsb when +-// called. gsb.mu must not be held. Does not panic with a nil receiver. ++// Close closes the underlying LB policy and shuts down the subconns it ++// created. bw must not be referenced via balancerCurrent or balancerPending in ++// gsb when called. gsb.mu must not be held. Does not panic with a nil ++// receiver. + func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { +@@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() { + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { +- bw.gsb.cc.RemoveSubConn(sc) ++ sc.Shutdown() + } + bw.gsb.mu.Unlock() + } +@@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne + } + bw.gsb.mu.Unlock() + ++ var sc balancer.SubConn ++ oldListener := opts.StateListener ++ opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call +- bw.gsb.cc.RemoveSubConn(sc) ++ sc.Shutdown() + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } +@@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + } + + func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { +- bw.gsb.mu.Lock() +- if !bw.gsb.balancerCurrentOrPending(bw) { +- bw.gsb.mu.Unlock() +- return +- } +- bw.gsb.mu.Unlock() +- bw.gsb.cc.RemoveSubConn(sc) ++ // Note: existing third party balancers may call this, so it must remain ++ // until RemoveSubConn is fully removed. ++ sc.Shutdown() + } + + func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { +diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go +index 3a905d96657..94a08d6875a 100644 +--- a/vendor/google.golang.org/grpc/internal/balancerload/load.go ++++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go +@@ -25,7 +25,7 @@ import ( + // Parser converts loads from metadata into a concrete type. + type Parser interface { + // Parse parses loads from metadata. +- Parse(md metadata.MD) interface{} ++ Parse(md metadata.MD) any + } + + var parser Parser +@@ -38,7 +38,7 @@ func SetParser(lr Parser) { + } + + // Parse calls parser.Read(). +-func Parse(md metadata.MD) interface{} { ++func Parse(md metadata.MD) any { + if parser == nil { + return nil + } +diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +index 6c3f632215f..0f31274a3cc 100644 +--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go ++++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +@@ -230,7 +230,7 @@ type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. +- Message interface{} ++ Message any + } + + func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { +@@ -270,7 +270,7 @@ type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. +- Message interface{} ++ Message any + } + + func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { +diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +index 81c2f5fd761..4399c3df495 100644 +--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go ++++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +@@ -28,25 +28,25 @@ import "sync" + // the underlying mutex used for synchronization. + // + // Unbounded supports values of any type to be stored in it by using a channel +-// of `interface{}`. This means that a call to Put() incurs an extra memory +-// allocation, and also that users need a type assertion while reading. For +-// performance critical code paths, using Unbounded is strongly discouraged and +-// defining a new type specific implementation of this buffer is preferred. See ++// of `any`. This means that a call to Put() incurs an extra memory allocation, ++// and also that users need a type assertion while reading. For performance ++// critical code paths, using Unbounded is strongly discouraged and defining a ++// new type specific implementation of this buffer is preferred. See + // internal/transport/transport.go for an example of this. + type Unbounded struct { +- c chan interface{} ++ c chan any + closed bool + mu sync.Mutex +- backlog []interface{} ++ backlog []any + } + + // NewUnbounded returns a new instance of Unbounded. + func NewUnbounded() *Unbounded { +- return &Unbounded{c: make(chan interface{}, 1)} ++ return &Unbounded{c: make(chan any, 1)} + } + + // Put adds t to the unbounded buffer. +-func (b *Unbounded) Put(t interface{}) { ++func (b *Unbounded) Put(t any) { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { +@@ -89,7 +89,7 @@ func (b *Unbounded) Load() { + // + // If the unbounded buffer is closed, the read channel returned by this method + // is closed. +-func (b *Unbounded) Get() <-chan interface{} { ++func (b *Unbounded) Get() <-chan any { + return b.c + } + +diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go +index 777cbcd7921..5395e77529c 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go +@@ -24,9 +24,7 @@ + package channelz + + import ( +- "context" + "errors" +- "fmt" + "sort" + "sync" + "sync/atomic" +@@ -40,8 +38,11 @@ const ( + ) + + var ( +- db dbWrapper +- idGen idGenerator ++ // IDGen is the global channelz entity ID generator. It should not be used ++ // outside this package except by tests. ++ IDGen IDGenerator ++ ++ db dbWrapper + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 +@@ -52,14 +53,14 @@ var ( + func TurnOn() { + if !IsOn() { + db.set(newChannelMap()) +- idGen.reset() ++ IDGen.Reset() + atomic.StoreInt32(&curState, 1) + } + } + + // IsOn returns whether channelz data collection is on. + func IsOn() bool { +- return atomic.CompareAndSwapInt32(&curState, 1, 1) ++ return atomic.LoadInt32(&curState) == 1 + } + + // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +@@ -97,43 +98,6 @@ func (d *dbWrapper) get() *channelMap { + return d.DB + } + +-// NewChannelzStorageForTesting initializes channelz data storage and id +-// generator for testing purposes. +-// +-// Returns a cleanup function to be invoked by the test, which waits for up to +-// 10s for all channelz state to be reset by the grpc goroutines when those +-// entities get closed. This cleanup function helps with ensuring that tests +-// don't mess up each other. +-func NewChannelzStorageForTesting() (cleanup func() error) { +- db.set(newChannelMap()) +- idGen.reset() +- +- return func() error { +- cm := db.get() +- if cm == nil { +- return nil +- } +- +- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) +- defer cancel() +- ticker := time.NewTicker(10 * time.Millisecond) +- defer ticker.Stop() +- for { +- cm.mu.RLock() +- topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) +- cm.mu.RUnlock() +- +- if err := ctx.Err(); err != nil { +- return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) +- } +- if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { +- return nil +- } +- <-ticker.C +- } +- } +-} +- + // GetTopChannels returns a slice of top channel's ChannelMetric, along with a + // boolean indicating whether there's more top channels to be queried for. + // +@@ -193,7 +157,7 @@ func GetServer(id int64) *ServerMetric { + // + // If channelz is not turned ON, the channelz database is not mutated. + func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { +- id := idGen.genID() ++ id := IDGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { +@@ -229,7 +193,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") + } +- id := idGen.genID() ++ id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } +@@ -251,7 +215,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er + // + // If channelz is not turned ON, the channelz database is not mutated. + func RegisterServer(s Server, ref string) *Identifier { +- id := idGen.genID() ++ id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } +@@ -277,7 +241,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") + } +- id := idGen.genID() ++ id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } +@@ -297,7 +261,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") + } +- id := idGen.genID() ++ id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } +@@ -776,14 +740,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { + return sm + } + +-type idGenerator struct { ++// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. ++type IDGenerator struct { + id int64 + } + +-func (i *idGenerator) reset() { ++// Reset resets the generated ID back to zero. Should only be used at ++// initialization or by tests sensitive to the ID number. ++func (i *IDGenerator) Reset() { + atomic.StoreInt64(&i.id, 0) + } + +-func (i *idGenerator) genID() int64 { ++func (i *IDGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) + } +diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go +index 8e13a3d2ce7..f89e6f77bbd 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/logging.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go +@@ -31,7 +31,7 @@ func withParens(id *Identifier) string { + } + + // Info logs and adds a trace event if channelz is on. +-func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { ++func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, +@@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + } + + // Infof logs and adds a trace event if channelz is on. +-func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { ++func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, +@@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter + } + + // Warning logs and adds a trace event if channelz is on. +-func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { ++func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, +@@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + } + + // Warningf logs and adds a trace event if channelz is on. +-func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { ++func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, +@@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in + } + + // Error logs and adds a trace event if channelz is on. +-func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { ++func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, +@@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + } + + // Errorf logs and adds a trace event if channelz is on. +-func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { ++func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, +diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go +index 7b2f350e2e6..1d4020f5379 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/types.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/types.go +@@ -628,6 +628,7 @@ type tracedChannel interface { + + type channelTrace struct { + cm *channelMap ++ clearCalled bool + createdTime time.Time + eventCount int64 + mu sync.Mutex +@@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { + } + + func (c *channelTrace) clear() { ++ if c.clearCalled { ++ return ++ } ++ c.clearCalled = true + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { +diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +index 8d194e44e1d..98288c3f866 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +@@ -23,7 +23,7 @@ import ( + ) + + // GetSocketOption gets the socket option info of the conn. +-func GetSocketOption(socket interface{}) *SocketOptionData { ++func GetSocketOption(socket any) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil +diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +index 837ddc40240..b5568b22e20 100644 +--- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go ++++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +@@ -22,6 +22,6 @@ + package channelz + + // GetSocketOption gets the socket option info of the conn. +-func GetSocketOption(c interface{}) *SocketOptionData { ++func GetSocketOption(c any) *SocketOptionData { + return nil + } +diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go +index 32c9b59033c..9deee7f6513 100644 +--- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go ++++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go +@@ -25,12 +25,12 @@ import ( + type requestInfoKey struct{} + + // NewRequestInfoContext creates a context with ri. +-func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { ++func NewRequestInfoContext(ctx context.Context, ri any) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) + } + + // RequestInfoFromContext extracts the RequestInfo from ctx. +-func RequestInfoFromContext(ctx context.Context) interface{} { ++func RequestInfoFromContext(ctx context.Context) any { + return ctx.Value(requestInfoKey{}) + } + +@@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { + type clientHandshakeInfoKey struct{} + + // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +-func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { ++func ClientHandshakeInfoFromContext(ctx context.Context) any { + return ctx.Value(clientHandshakeInfoKey{}) + } + + // NewClientHandshakeInfoContext creates a context with chi. +-func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { ++func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) + } +diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +index 80fd5c7d2a4..3cf10ddfbd4 100644 +--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go ++++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +@@ -37,9 +37,15 @@ var ( + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the +- // pick_first LB policy, which can be enabled by setting the environment +- // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". +- PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) ++ // pick_first LB policy. ++ PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) ++ // LeastRequestLB is set if we should support the least_request_experimental ++ // LB policy, which can be enabled by setting the environment variable ++ // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". ++ LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) ++ // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS ++ // handshakes that can be performed. ++ ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + ) + + func boolFromEnv(envVar string, def bool) bool { +diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +index b68e26a3649..bfc45102ab2 100644 +--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go ++++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +@@ -30,7 +30,7 @@ var Logger LoggerV2 + var DepthLogger DepthLoggerV2 + + // InfoDepth logs to the INFO log at the specified depth. +-func InfoDepth(depth int, args ...interface{}) { ++func InfoDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { +@@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { + } + + // WarningDepth logs to the WARNING log at the specified depth. +-func WarningDepth(depth int, args ...interface{}) { ++func WarningDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { +@@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { + } + + // ErrorDepth logs to the ERROR log at the specified depth. +-func ErrorDepth(depth int, args ...interface{}) { ++func ErrorDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { +@@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { + } + + // FatalDepth logs to the FATAL log at the specified depth. +-func FatalDepth(depth int, args ...interface{}) { ++func FatalDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { +@@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { + // is defined here to avoid a circular dependency. + type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +- Info(args ...interface{}) ++ Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +- Infoln(args ...interface{}) ++ Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +- Infof(format string, args ...interface{}) ++ Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +- Warning(args ...interface{}) ++ Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +- Warningln(args ...interface{}) ++ Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +- Warningf(format string, args ...interface{}) ++ Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +- Error(args ...interface{}) ++ Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +- Errorln(args ...interface{}) ++ Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +- Errorf(format string, args ...interface{}) ++ Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatal(args ...interface{}) ++ Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatalln(args ...interface{}) ++ Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. +- Fatalf(format string, args ...interface{}) ++ Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool + } +@@ -116,11 +116,11 @@ type LoggerV2 interface { + // later release. + type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. +- InfoDepth(depth int, args ...interface{}) ++ InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. +- WarningDepth(depth int, args ...interface{}) ++ WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. +- ErrorDepth(depth int, args ...interface{}) ++ ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. +- FatalDepth(depth int, args ...interface{}) ++ FatalDepth(depth int, args ...any) + } +diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +index 02224b42ca8..faa998de763 100644 +--- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go ++++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +@@ -31,7 +31,7 @@ type PrefixLogger struct { + } + + // Infof does info logging. +-func (pl *PrefixLogger) Infof(format string, args ...interface{}) { ++func (pl *PrefixLogger) Infof(format string, args ...any) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format +@@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + } + + // Warningf does warning logging. +-func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { ++func (pl *PrefixLogger) Warningf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) +@@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + } + + // Errorf does error logging. +-func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { ++func (pl *PrefixLogger) Errorf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) +@@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + } + + // Debugf does info logging at verbose level 2. +-func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { ++func (pl *PrefixLogger) Debugf(format string, args ...any) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. +diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +index d08e3e90766..aa97273e7d1 100644 +--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go ++++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +@@ -80,6 +80,13 @@ func Uint32() uint32 { + return r.Uint32() + } + ++// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. ++func ExpFloat64() float64 { ++ mu.Lock() ++ defer mu.Unlock() ++ return r.ExpFloat64() ++} ++ + // Shuffle implements rand.Shuffle on the grpcrand global source. + var Shuffle = func(n int, f func(int, int)) { + mu.Lock() +diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +index 37b8d4117e7..900917dbe6c 100644 +--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go ++++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +@@ -32,10 +32,10 @@ import ( + // + // This type is safe for concurrent access. + type CallbackSerializer struct { +- // Done is closed once the serializer is shut down completely, i.e all ++ // done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. +- Done chan struct{} ++ done chan struct{} + + callbacks *buffer.Unbounded + closedMu sync.Mutex +@@ -48,12 +48,12 @@ type CallbackSerializer struct { + // callbacks will be added once this context is canceled, and any pending un-run + // callbacks will be executed before the serializer is shut down. + func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { +- t := &CallbackSerializer{ +- Done: make(chan struct{}), ++ cs := &CallbackSerializer{ ++ done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } +- go t.run(ctx) +- return t ++ go cs.run(ctx) ++ return cs + } + + // Schedule adds a callback to be scheduled after existing callbacks are run. +@@ -64,56 +64,62 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + // Return value indicates if the callback was successfully added to the list of + // callbacks to be executed by the serializer. It is not possible to add + // callbacks once the context passed to NewCallbackSerializer is cancelled. +-func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { +- t.closedMu.Lock() +- defer t.closedMu.Unlock() ++func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { ++ cs.closedMu.Lock() ++ defer cs.closedMu.Unlock() + +- if t.closed { ++ if cs.closed { + return false + } +- t.callbacks.Put(f) ++ cs.callbacks.Put(f) + return true + } + +-func (t *CallbackSerializer) run(ctx context.Context) { ++func (cs *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + +- defer close(t.Done) ++ defer close(cs.done) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. +- case callback, ok := <-t.callbacks.Get(): ++ case callback, ok := <-cs.callbacks.Get(): + if !ok { + return + } +- t.callbacks.Load() ++ cs.callbacks.Load() + callback.(func(ctx context.Context))(ctx) + } + } + + // Fetch pending callbacks if any, and execute them before returning from +- // this method and closing t.Done. +- t.closedMu.Lock() +- t.closed = true +- backlog = t.fetchPendingCallbacks() +- t.callbacks.Close() +- t.closedMu.Unlock() ++ // this method and closing cs.done. ++ cs.closedMu.Lock() ++ cs.closed = true ++ backlog = cs.fetchPendingCallbacks() ++ cs.callbacks.Close() ++ cs.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } + } + +-func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { ++func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { +- case b := <-t.callbacks.Get(): ++ case b := <-cs.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) +- t.callbacks.Load() ++ cs.callbacks.Load() + default: + return backlog + } + } + } ++ ++// Done returns a channel that is closed after the context passed to ++// NewCallbackSerializer is canceled and all callbacks have been executed. ++func (cs *CallbackSerializer) Done() <-chan struct{} { ++ return cs.done ++} +diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +new file mode 100644 +index 00000000000..aef8cec1ab0 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +@@ -0,0 +1,121 @@ ++/* ++ * ++ * Copyright 2023 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package grpcsync ++ ++import ( ++ "context" ++ "sync" ++) ++ ++// Subscriber represents an entity that is subscribed to messages published on ++// a PubSub. It wraps the callback to be invoked by the PubSub when a new ++// message is published. ++type Subscriber interface { ++ // OnMessage is invoked when a new message is published. Implementations ++ // must not block in this method. ++ OnMessage(msg any) ++} ++ ++// PubSub is a simple one-to-many publish-subscribe system that supports ++// messages of arbitrary type. It guarantees that messages are delivered in ++// the same order in which they were published. ++// ++// Publisher invokes the Publish() method to publish new messages, while ++// subscribers interested in receiving these messages register a callback ++// via the Subscribe() method. ++// ++// Once a PubSub is stopped, no more messages can be published, but any pending ++// published messages will be delivered to the subscribers. Done may be used ++// to determine when all published messages have been delivered. ++type PubSub struct { ++ cs *CallbackSerializer ++ ++ // Access to the below fields are guarded by this mutex. ++ mu sync.Mutex ++ msg any ++ subscribers map[Subscriber]bool ++} ++ ++// NewPubSub returns a new PubSub instance. Users should cancel the ++// provided context to shutdown the PubSub. ++func NewPubSub(ctx context.Context) *PubSub { ++ return &PubSub{ ++ cs: NewCallbackSerializer(ctx), ++ subscribers: map[Subscriber]bool{}, ++ } ++} ++ ++// Subscribe registers the provided Subscriber to the PubSub. ++// ++// If the PubSub contains a previously published message, the Subscriber's ++// OnMessage() callback will be invoked asynchronously with the existing ++// message to begin with, and subsequently for every newly published message. ++// ++// The caller is responsible for invoking the returned cancel function to ++// unsubscribe itself from the PubSub. ++func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { ++ ps.mu.Lock() ++ defer ps.mu.Unlock() ++ ++ ps.subscribers[sub] = true ++ ++ if ps.msg != nil { ++ msg := ps.msg ++ ps.cs.Schedule(func(context.Context) { ++ ps.mu.Lock() ++ defer ps.mu.Unlock() ++ if !ps.subscribers[sub] { ++ return ++ } ++ sub.OnMessage(msg) ++ }) ++ } ++ ++ return func() { ++ ps.mu.Lock() ++ defer ps.mu.Unlock() ++ delete(ps.subscribers, sub) ++ } ++} ++ ++// Publish publishes the provided message to the PubSub, and invokes ++// callbacks registered by subscribers asynchronously. ++func (ps *PubSub) Publish(msg any) { ++ ps.mu.Lock() ++ defer ps.mu.Unlock() ++ ++ ps.msg = msg ++ for sub := range ps.subscribers { ++ s := sub ++ ps.cs.Schedule(func(context.Context) { ++ ps.mu.Lock() ++ defer ps.mu.Unlock() ++ if !ps.subscribers[s] { ++ return ++ } ++ s.OnMessage(msg) ++ }) ++ } ++} ++ ++// Done returns a channel that is closed after the context passed to NewPubSub ++// is canceled and all updates have been sent to subscribers. ++func (ps *PubSub) Done() <-chan struct{} { ++ return ps.cs.Done() ++} +diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go +similarity index 61% +rename from vendor/google.golang.org/grpc/idle.go +rename to vendor/google.golang.org/grpc/internal/idle/idle.go +index dc3dc72f6b0..6c272476e5e 100644 +--- a/vendor/google.golang.org/grpc/idle.go ++++ b/vendor/google.golang.org/grpc/internal/idle/idle.go +@@ -16,7 +16,9 @@ + * + */ + +-package grpc ++// Package idle contains a component for managing idleness (entering and exiting) ++// based on RPC activity. ++package idle + + import ( + "fmt" +@@ -24,6 +26,8 @@ import ( + "sync" + "sync/atomic" + "time" ++ ++ "google.golang.org/grpc/grpclog" + ) + + // For overriding in unit tests. +@@ -31,31 +35,31 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) + } + +-// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter ++// Enforcer is the functionality provided by grpc.ClientConn to enter + // and exit from idle mode. +-type idlenessEnforcer interface { +- exitIdleMode() error +- enterIdleMode() error ++type Enforcer interface { ++ ExitIdleMode() error ++ EnterIdleMode() error + } + +-// idlenessManager defines the functionality required to track RPC activity on a ++// Manager defines the functionality required to track RPC activity on a + // channel. +-type idlenessManager interface { +- onCallBegin() error +- onCallEnd() +- close() ++type Manager interface { ++ OnCallBegin() error ++ OnCallEnd() ++ Close() + } + +-type noopIdlenessManager struct{} ++type noopManager struct{} + +-func (noopIdlenessManager) onCallBegin() error { return nil } +-func (noopIdlenessManager) onCallEnd() {} +-func (noopIdlenessManager) close() {} ++func (noopManager) OnCallBegin() error { return nil } ++func (noopManager) OnCallEnd() {} ++func (noopManager) Close() {} + +-// idlenessManagerImpl implements the idlenessManager interface. It uses atomic +-// operations to synchronize access to shared state and a mutex to guarantee +-// mutual exclusion in a critical section. +-type idlenessManagerImpl struct { ++// manager implements the Manager interface. It uses atomic operations to ++// synchronize access to shared state and a mutex to guarantee mutual exclusion ++// in a critical section. ++type manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. +@@ -64,14 +68,15 @@ type idlenessManagerImpl struct { + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. +- enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. +- timeout int64 // Idle timeout duration nanos stored as an int64. ++ enforcer Enforcer // Functionality provided by grpc.ClientConn. ++ timeout int64 // Idle timeout duration nanos stored as an int64. ++ logger grpclog.LoggerV2 + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. +- // - b: At the same time an RPC is made on the channel, and onCallBegin() ++ // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at +@@ -83,28 +88,37 @@ type idlenessManagerImpl struct { + timer *time.Timer + } + +-// newIdlenessManager creates a new idleness manager implementation for the ++// ManagerOptions is a collection of options used by ++// NewManager. ++type ManagerOptions struct { ++ Enforcer Enforcer ++ Timeout time.Duration ++ Logger grpclog.LoggerV2 ++} ++ ++// NewManager creates a new idleness manager implementation for the + // given idle timeout. +-func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { +- if idleTimeout == 0 { +- return noopIdlenessManager{} ++func NewManager(opts ManagerOptions) Manager { ++ if opts.Timeout == 0 { ++ return noopManager{} + } + +- i := &idlenessManagerImpl{ +- enforcer: enforcer, +- timeout: int64(idleTimeout), ++ m := &manager{ ++ enforcer: opts.Enforcer, ++ timeout: int64(opts.Timeout), ++ logger: opts.Logger, + } +- i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) +- return i ++ m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) ++ return m + } + + // resetIdleTimer resets the idle timer to the given duration. This method + // should only be called from the timer callback. +-func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { +- i.idleMu.Lock() +- defer i.idleMu.Unlock() ++func (m *manager) resetIdleTimer(d time.Duration) { ++ m.idleMu.Lock() ++ defer m.idleMu.Unlock() + +- if i.timer == nil { ++ if m.timer == nil { + // Only close sets timer to nil. We are done. + return + } +@@ -112,47 +126,47 @@ func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. +- i.timer.Reset(d) ++ m.timer.Reset(d) + } + + // handleIdleTimeout is the timer callback that is invoked upon expiry of the + // configured idle timeout. The channel is considered inactive if there are no + // ongoing calls and no RPC activity since the last time the timer fired. +-func (i *idlenessManagerImpl) handleIdleTimeout() { +- if i.isClosed() { ++func (m *manager) handleIdleTimeout() { ++ if m.isClosed() { + return + } + +- if atomic.LoadInt32(&i.activeCallsCount) > 0 { +- i.resetIdleTimer(time.Duration(i.timeout)) ++ if atomic.LoadInt32(&m.activeCallsCount) > 0 { ++ m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. +- if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { ++ if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. +- atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) +- i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) ++ atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) ++ m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the +- // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the ++ // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the + // channel is either in idle mode or is trying to get there. +- if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { ++ if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. +- i.resetIdleTimer(time.Duration(i.timeout)) ++ m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. +- if i.tryEnterIdleMode() { ++ if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } +@@ -160,8 +174,8 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. +- atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) +- i.resetIdleTimer(time.Duration(i.timeout)) ++ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) ++ m.resetIdleTimer(time.Duration(m.timeout)) + } + + // tryEnterIdleMode instructs the channel to enter idle mode. But before +@@ -171,15 +185,15 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { + // Return value indicates whether or not the channel moved to idle mode. + // + // Holds idleMu which ensures mutual exclusion with exitIdleMode. +-func (i *idlenessManagerImpl) tryEnterIdleMode() bool { +- i.idleMu.Lock() +- defer i.idleMu.Unlock() ++func (m *manager) tryEnterIdleMode() bool { ++ m.idleMu.Lock() ++ defer m.idleMu.Unlock() + +- if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { ++ if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } +- if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { ++ if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. +@@ -189,99 +203,99 @@ func (i *idlenessManagerImpl) tryEnterIdleMode() bool { + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. +- if err := i.enforcer.enterIdleMode(); err != nil { +- logger.Errorf("Failed to enter idle mode: %v", err) ++ if err := m.enforcer.EnterIdleMode(); err != nil { ++ m.logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. +- i.actuallyIdle = true ++ m.actuallyIdle = true + return true + } + +-// onCallBegin is invoked at the start of every RPC. +-func (i *idlenessManagerImpl) onCallBegin() error { +- if i.isClosed() { ++// OnCallBegin is invoked at the start of every RPC. ++func (m *manager) OnCallBegin() error { ++ if m.isClosed() { + return nil + } + +- if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { ++ if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. +- atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) ++ atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. +- if err := i.exitIdleMode(); err != nil { ++ if err := m.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. +- atomic.AddInt32(&i.activeCallsCount, -1) ++ atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + +- atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) ++ atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // exitIdleMode instructs the channel to exit idle mode. + // + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +-func (i *idlenessManagerImpl) exitIdleMode() error { +- i.idleMu.Lock() +- defer i.idleMu.Unlock() ++func (m *manager) exitIdleMode() error { ++ m.idleMu.Lock() ++ defer m.idleMu.Unlock() + +- if !i.actuallyIdle { ++ if !m.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC +- // came in and onCallBegin() noticed that the calls count is negative. ++ // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same +- // time, all of them notice a negative calls count in onCallBegin and get ++ // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + +- if err := i.enforcer.exitIdleMode(); err != nil { ++ if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. +- atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) +- i.actuallyIdle = false ++ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) ++ m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. +- i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) ++ m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + return nil + } + +-// onCallEnd is invoked at the end of every RPC. +-func (i *idlenessManagerImpl) onCallEnd() { +- if i.isClosed() { ++// OnCallEnd is invoked at the end of every RPC. ++func (m *manager) OnCallEnd() { ++ if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. +- atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) ++ atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. +- atomic.AddInt32(&i.activeCallsCount, -1) ++ atomic.AddInt32(&m.activeCallsCount, -1) + } + +-func (i *idlenessManagerImpl) isClosed() bool { +- return atomic.LoadInt32(&i.closed) == 1 ++func (m *manager) isClosed() bool { ++ return atomic.LoadInt32(&m.closed) == 1 + } + +-func (i *idlenessManagerImpl) close() { +- atomic.StoreInt32(&i.closed, 1) ++func (m *manager) Close() { ++ atomic.StoreInt32(&m.closed, 1) + +- i.idleMu.Lock() +- i.timer.Stop() +- i.timer = nil +- i.idleMu.Unlock() ++ m.idleMu.Lock() ++ m.timer.Stop() ++ m.timer = nil ++ m.idleMu.Unlock() + } +diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go +index 42ff39c8444..0d94c63e06e 100644 +--- a/vendor/google.golang.org/grpc/internal/internal.go ++++ b/vendor/google.golang.org/grpc/internal/internal.go +@@ -30,7 +30,7 @@ import ( + + var ( + // WithHealthCheckFunc is set by dialoptions.go +- WithHealthCheckFunc interface{} // func (HealthChecker) DialOption ++ WithHealthCheckFunc any // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. +@@ -38,8 +38,12 @@ var ( + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second ++ // KeepaliveMinServerPingTime is the minimum ping interval for servers. ++ // This must be 1s by default, but tests may wish to set it lower for ++ // convenience. ++ KeepaliveMinServerPingTime = time.Second + // ParseServiceConfig parses a JSON representation of the service config. +- ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult ++ ParseServiceConfig any // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfig. + // This function compares the config without rawJSON stripped, in case the +@@ -49,33 +53,33 @@ var ( + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. +- GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder ++ GetCertificateProviderBuilder any // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. +- GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo ++ GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. +- GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials ++ GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- CanonicalString interface{} // func (codes.Code) string ++ CanonicalString any // func (codes.Code) string + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. +- DrainServerTransports interface{} // func(*grpc.Server, string) ++ DrainServerTransports any // func(*grpc.Server, string) + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- AddGlobalServerOptions interface{} // func(opt ...ServerOption) ++ AddGlobalServerOptions any // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + // +@@ -88,14 +92,14 @@ var ( + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- AddGlobalDialOptions interface{} // func(opt ...DialOption) ++ AddGlobalDialOptions any // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- DisableGlobalDialOptions interface{} // func() grpc.DialOption ++ DisableGlobalDialOptions any // func() grpc.DialOption + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + // +@@ -104,23 +108,26 @@ var ( + ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. +- JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption ++ JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption + // JoinServerOptions combines the server options passed as arguments into a + // single server option. +- JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption ++ JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption ++ WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. +- BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption ++ BinaryLogger any // func(binarylog.Logger) grpc.ServerOption ++ ++ // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn ++ SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from +@@ -131,7 +138,7 @@ var ( + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. +- NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) ++ NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment +@@ -163,7 +170,17 @@ var ( + UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. +- ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) ++ ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) ++ ++ // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra ++ // metadata to RPCs. ++ GRPCResolverSchemeExtraMetadata string = "xds" ++ ++ // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. ++ EnterIdleModeForTesting any // func(*grpc.ClientConn) error ++ ++ // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ++ ExitIdleModeForTesting any // func(*grpc.ClientConn) error + ) + + // HealthChecker defines the signature of the client-side LB channel health checking function. +@@ -174,7 +191,7 @@ var ( + // + // The health checking protocol is defined at: + // https://github.com/grpc/grpc/blob/master/doc/health-checking.md +-type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error ++type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error + + const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. +diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go +index c82e608e077..900bfb71608 100644 +--- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go ++++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go +@@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") + + type mdValue metadata.MD + +-func (m mdValue) Equal(o interface{}) bool { ++func (m mdValue) Equal(o any) bool { + om, ok := o.(mdValue) + if !ok { + return false +diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go +index 0177af4b511..7033191375d 100644 +--- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go ++++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go +@@ -35,7 +35,7 @@ const jsonIndent = " " + // ToJSON marshals the input into a json string. + // + // If marshal fails, it falls back to fmt.Sprintf("%+v"). +-func ToJSON(e interface{}) string { ++func ToJSON(e any) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} +diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +index c7a18a948ad..f0603871c93 100644 +--- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go ++++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +@@ -92,7 +92,7 @@ type ClientStream interface { + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. +- SendMsg(m interface{}) error ++ SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC +@@ -101,7 +101,7 @@ type ClientStream interface { + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. +- RecvMsg(m interface{}) error ++ RecvMsg(m any) error + } + + // ClientInterceptor is an interceptor for gRPC client streams. +diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +index 09a667f33cb..99e1e5b36c8 100644 +--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go ++++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +@@ -62,7 +62,8 @@ const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" +- // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. ++ // txtPrefix is the prefix string to be prepended to the host name for txt ++ // record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. +@@ -86,14 +87,14 @@ var ( + minDNSResRate = 30 * time.Second + ) + +-var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { +- return func(ctx context.Context, network, address string) (net.Conn, error) { ++var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { ++ return func(ctx context.Context, network, _ string) (net.Conn, error) { + var dialer net.Dialer +- return dialer.DialContext(ctx, network, authority) ++ return dialer.DialContext(ctx, network, address) + } + } + +-var customAuthorityResolver = func(authority string) (netResolver, error) { ++var newNetResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err +@@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { + + return &net.Resolver{ + PreferGo: true, +- Dial: customAuthorityDialler(authorityWithPort), ++ Dial: addressDialer(authorityWithPort), + }, nil + } + +@@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder { + + type dnsBuilder struct{} + +-// Build creates and starts a DNS resolver that watches the name resolution of the target. ++// Build creates and starts a DNS resolver that watches the name resolution of ++// the target. + func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint(), defaultPort) + if err != nil { +@@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts + if target.URL.Host == "" { + d.resolver = defaultResolver + } else { +- d.resolver, err = customAuthorityResolver(target.URL.Host) ++ d.resolver, err = newNetResolver(target.URL.Host) + if err != nil { + return nil, err + } +@@ -180,19 +182,22 @@ type dnsResolver struct { + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn +- // rn channel is used by ResolveNow() to force an immediate resolution of the target. ++ // rn channel is used by ResolveNow() to force an immediate resolution of the ++ // target. + rn chan struct{} +- // wg is used to enforce Close() to return after the watcher() goroutine has finished. +- // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we +- // replace the real lookup functions with mocked ones to facilitate testing. +- // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes +- // will warns lookup (READ the lookup function pointers) inside watcher() goroutine +- // has data race with replaceNetFunc (WRITE the lookup function pointers). ++ // wg is used to enforce Close() to return after the watcher() goroutine has ++ // finished. Otherwise, data race will be possible. [Race Example] in ++ // dns_resolver_test we replace the real lookup functions with mocked ones to ++ // facilitate testing. If Close() doesn't wait for watcher() goroutine ++ // finishes, race detector sometimes will warns lookup (READ the lookup ++ // function pointers) inside watcher() goroutine has data race with ++ // replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool + } + +-// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. ++// ResolveNow invoke an immediate resolution of the target that this ++// dnsResolver watches. + func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: +@@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() { + + var timer *time.Timer + if err == nil { +- // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least +- // to prevent constantly re-resolving. ++ // Success resolving, wait for the next ResolveNow. However, also wait 30 ++ // seconds at the very least to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { +@@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() { + case <-d.rn: + } + } else { +- // Poll on an error found in DNS Resolver or an error received from ClientConn. ++ // Poll on an error found in DNS Resolver or an error received from ++ // ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } +@@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + } + + func handleDNSError(err error, lookupType string) error { +- if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { ++ dnsErr, ok := err.(*net.DNSError) ++ if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). +@@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + res += s + } + +- // TXT record must have "grpc_config=" attribute in order to be used as service config. ++ // TXT record must have "grpc_config=" attribute in order to be used as ++ // service config. + if !strings.HasPrefix(res, txtAttribute) { + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) +- // This is not an error; it is the equivalent of not having a service config. ++ // This is not an error; it is the equivalent of not having a service ++ // config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) +@@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { + return &state, nil + } + +-// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +-// If addr is an IPv4 address, return the addr and ok = true. +-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. ++// formatIP returns ok = false if addr is not a valid textual representation of ++// an IP address. If addr is an IPv4 address, return the addr and ok = true. ++// If addr is an IPv6 address, return the addr enclosed in square brackets and ++// ok = true. + func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { +@@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) { + return "[" + addr + "]", true + } + +-// parseTarget takes the user input target string and default port, returns formatted host and port info. +-// If target doesn't specify a port, set the port to be the defaultPort. +-// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +-// are stripped when setting the host. ++// parseTarget takes the user input target string and default port, returns ++// formatted host and port info. If target doesn't specify a port, set the port ++// to be the defaultPort. If target is in IPv6 format and host-name is enclosed ++// in square brackets, brackets are stripped when setting the host. + // examples: + // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" + // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +@@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { +- // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. ++ // If the port field is empty (target ends with colon), e.g. "[::1]:", ++ // this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { +- // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. ++ // Keep consistent with net.Dial(): If the host is empty, as in ":80", ++ // the local system is assumed. + host = "localhost" + } + return host, port, nil +diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go +index b0ead4f54f8..03ef2fedd5c 100644 +--- a/vendor/google.golang.org/grpc/internal/status/status.go ++++ b/vendor/google.golang.org/grpc/internal/status/status.go +@@ -43,13 +43,41 @@ type Status struct { + s *spb.Status + } + ++// NewWithProto returns a new status including details from statusProto. This ++// is meant to be used by the gRPC library only. ++func NewWithProto(code codes.Code, message string, statusProto []string) *Status { ++ if len(statusProto) != 1 { ++ // No grpc-status-details bin header, or multiple; just ignore. ++ return &Status{s: &spb.Status{Code: int32(code), Message: message}} ++ } ++ st := &spb.Status{} ++ if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { ++ // Probably not a google.rpc.Status proto; do not provide details. ++ return &Status{s: &spb.Status{Code: int32(code), Message: message}} ++ } ++ if st.Code == int32(code) { ++ // The codes match between the grpc-status header and the ++ // grpc-status-details-bin header; use the full details proto. ++ return &Status{s: st} ++ } ++ return &Status{ ++ s: &spb.Status{ ++ Code: int32(codes.Internal), ++ Message: fmt.Sprintf( ++ "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", ++ code, message, st, ++ ), ++ }, ++ } ++} ++ + // New returns a Status representing c and msg. + func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} + } + + // Newf returns New(c, fmt.Sprintf(format, a...)). +-func Newf(c codes.Code, format string, a ...interface{}) *Status { ++func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) + } + +@@ -64,7 +92,7 @@ func Err(c codes.Code, msg string) error { + } + + // Errorf returns Error(c, fmt.Sprintf(format, a...)). +-func Errorf(c codes.Code, format string, a ...interface{}) error { ++func Errorf(c codes.Code, format string, a ...any) error { + return Err(c, fmt.Sprintf(format, a...)) + } + +@@ -120,11 +148,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + + // Details returns a slice of details messages attached to the status. + // If a detail cannot be decoded, the error is returned in place of the detail. +-func (s *Status) Details() []interface{} { ++func (s *Status) Details() []any { + if s == nil || s.s == nil { + return nil + } +- details := make([]interface{}, 0, len(s.s.Details)) ++ details := make([]any, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { +diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +index be5a9c81eb9..b330ccedc8a 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go ++++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +@@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + } + + type itemNode struct { +- it interface{} ++ it any + next *itemNode + } + +@@ -49,7 +49,7 @@ type itemList struct { + tail *itemNode + } + +-func (il *itemList) enqueue(i interface{}) { ++func (il *itemList) enqueue(i any) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n +@@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { + + // peek returns the first item in the list without removing it from the + // list. +-func (il *itemList) peek() interface{} { ++func (il *itemList) peek() any { + return il.head.it + } + +-func (il *itemList) dequeue() interface{} { ++func (il *itemList) dequeue() any { + if il.head == nil { + return nil + } +@@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { + return err + } + +-func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { ++func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { +@@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b + } + + // Note argument f should never be nil. +-func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { ++func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() +@@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo + return true, nil + } + +-func (c *controlBuffer) get(block bool) (interface{}, error) { ++func (c *controlBuffer) get(block bool) (any, error) { + for { + c.mu.Lock() + if c.err != nil { +@@ -830,7 +830,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { + return nil + } + +-func (l *loopyWriter) handle(i interface{}) error { ++func (l *loopyWriter) handle(i any) error { + switch i := i.(type) { + case *incomingWindowUpdate: + l.incomingWindowUpdateHandler(i) +diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go +index 98f80e3fa00..17f7a21b5a9 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go ++++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go +@@ -220,18 +220,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + ++ s.hdrMu.Lock() + if p := st.Proto(); p != nil && len(p.Details) > 0 { ++ delete(s.trailer, grpcStatusDetailsBinHeader) + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + +- h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) ++ h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) + } + +- if md := s.Trailer(); len(md) > 0 { +- for k, vv := range md { ++ if len(s.trailer) > 0 { ++ for k, vv := range s.trailer { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue +@@ -243,6 +245,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro + } + } + } ++ s.hdrMu.Unlock() + }) + + if err == nil { // transport has not been closed +@@ -287,7 +290,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + } + + // writeCustomHeaders sets custom headers set on the stream via SetHeader +-// on the first write call (Write, WriteHeader, or WriteStatus). ++// on the first write call (Write, WriteHeader, or WriteStatus) + func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + +@@ -344,7 +347,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + return err + } + +-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { ++func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() +diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go +index 326bf084800..d6f5c49358b 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go +@@ -330,7 +330,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), +- framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), ++ framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), +@@ -762,7 +762,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, + firstTry := true + var ch chan struct{} + transportDrainRequired := false +- checkForStreamQuota := func(it interface{}) bool { ++ checkForStreamQuota := func(it any) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ +@@ -800,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, + return true + } + var hdrListSizeErr error +- checkForHeaderListSize := func(it interface{}) bool { ++ checkForHeaderListSize := func(it any) bool { + if t.maxSendHeaderListSize == nil { + return true + } +@@ -815,7 +815,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, + return true + } + for { +- success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { ++ success, err := t.controlBuf.executeAndPut(func(it any) bool { + return checkForHeaderListSize(it) && checkForStreamQuota(it) + }, hdr) + if err != nil { +@@ -927,7 +927,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. + rst: rst, + rstCode: rstCode, + } +- addBackStreamQuota := func(interface{}) bool { ++ addBackStreamQuota := func(any) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { +@@ -1080,7 +1080,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { + // for the transport and the stream based on the current bdp + // estimation. + func (t *http2Client) updateFlowControl(n uint32) { +- updateIWS := func(interface{}) bool { ++ updateIWS := func(any) bool { + t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { +@@ -1233,7 +1233,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } +- t.controlBuf.executeAndPut(func(interface{}) bool { ++ t.controlBuf.executeAndPut(func(any) bool { + for _, f := range updateFuncs { + f() + } +@@ -1399,7 +1399,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string +- statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string +@@ -1434,12 +1433,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) +- case "grpc-status-details-bin": +- var err error +- statusGen, err = decodeGRPCStatusDetails(hf.Value) +- if err != nil { +- headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) +- } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" +@@ -1505,14 +1498,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + return + } + +- isHeader := false +- +- // If headerChan hasn't been closed yet +- if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { +- s.headerValid = true +- if !endStream { +- // HEADERS frame block carries a Response-Headers. +- isHeader = true ++ // For headers, set them in s.header and close headerChan. For trailers or ++ // trailers-only, closeStream will set the trailers and close headerChan as ++ // needed. ++ if !endStream { ++ // If headerChan hasn't been closed yet (expected, given we checked it ++ // above, but something else could have potentially closed the whole ++ // stream). ++ if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { ++ s.headerValid = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. +@@ -1520,15 +1514,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + if len(mdata) > 0 { + s.header = mdata + } +- } else { +- // HEADERS frame block carries a Trailers-Only. +- s.noHeaders = true ++ close(s.headerChan) + } +- close(s.headerChan) + } + + for _, sh := range t.statsHandlers { +- if isHeader { ++ if !endStream { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), +@@ -1550,13 +1541,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + return + } + +- if statusGen == nil { +- statusGen = status.New(rawStatusCode, grpcMessage) +- } ++ status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + +- // if client received END_STREAM from server while stream was still active, send RST_STREAM +- rst := s.getState() == streamActive +- t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) ++ // If client received END_STREAM from server while stream was still active, ++ // send RST_STREAM. ++ rstStream := s.getState() == streamActive ++ t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) + } + + // readServerPreface reads and handles the initial settings frame from the +diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go +index ec4eef21342..6fa1eb41992 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go +@@ -165,7 +165,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } +- framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) ++ framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, +@@ -233,7 +233,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + kp.Timeout = defaultServerKeepaliveTimeout + } + if kp.Time != infinity { +- if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { ++ if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } +@@ -342,7 +342,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, + + // operateHeaders takes action on the decoded headers. Returns an error if fatal + // error encountered and transport needs to close, otherwise returns nil. +-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { ++func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() +@@ -561,7 +561,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + } + if t.inTapHandle != nil { + var err error +- if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { ++ if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { + t.mu.Unlock() + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) +@@ -592,7 +592,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } +- s.ctx = traceCtx(s.ctx, s.method) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ +@@ -630,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( + // HandleStreams receives incoming streams using the given handler. This is + // typically run in a separate goroutine. + // traceCtx attaches trace to ctx and returns the new context. +-func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { ++func (t *http2Server) HandleStreams(handle func(*Stream)) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() +@@ -665,7 +664,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: +- if err := t.operateHeaders(frame, handle, traceCtx); err != nil { ++ if err := t.operateHeaders(frame, handle); err != nil { + t.Close(err) + break + } +@@ -850,7 +849,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + } + return nil + }) +- t.controlBuf.executeAndPut(func(interface{}) bool { ++ t.controlBuf.executeAndPut(func(any) bool { + for _, f := range updateFuncs { + f() + } +@@ -934,7 +933,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) + return headerFields + } + +-func (t *http2Server) checkForHeaderListSize(it interface{}) bool { ++func (t *http2Server) checkForHeaderListSize(it any) bool { + if t.maxSendHeaderListSize == nil { + return true + } +@@ -1053,12 +1052,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { ++ // Do not use the user's grpc-status-details-bin (if present) if we are ++ // even attempting to set our own. ++ delete(s.trailer, grpcStatusDetailsBinHeader) + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) + } else { +- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) ++ headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) + } + } + +diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go +index 19cbb18f5ab..dc29d590e91 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go ++++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go +@@ -30,15 +30,13 @@ import ( + "net/url" + "strconv" + "strings" ++ "sync" + "time" + "unicode/utf8" + +- "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +- spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +- "google.golang.org/grpc/status" + ) + + const ( +@@ -87,6 +85,8 @@ var ( + } + ) + ++var grpcStatusDetailsBinHeader = "grpc-status-details-bin" ++ + // isReservedHeader checks whether hdr belongs to HTTP2 headers + // reserved by gRPC protocol. Any other headers are classified as the + // user-specified metadata. +@@ -102,7 +102,6 @@ func isReservedHeader(hdr string) bool { + "grpc-message", + "grpc-status", + "grpc-timeout", +- "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. +@@ -153,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { + return v, nil + } + +-func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { +- v, err := decodeBinHeader(rawDetails) +- if err != nil { +- return nil, err +- } +- st := &spb.Status{} +- if err = proto.Unmarshal(v, st); err != nil { +- return nil, err +- } +- return status.FromProto(st), nil +-} +- + type timeoutUnit uint8 + + const ( +@@ -309,6 +296,7 @@ func decodeGrpcMessageUnchecked(msg string) string { + } + + type bufWriter struct { ++ pool *sync.Pool + buf []byte + offset int + batchSize int +@@ -316,12 +304,17 @@ type bufWriter struct { + err error + } + +-func newBufWriter(conn net.Conn, batchSize int) *bufWriter { +- return &bufWriter{ +- buf: make([]byte, batchSize*2), ++func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { ++ w := &bufWriter{ + batchSize: batchSize, + conn: conn, ++ pool: pool, ++ } ++ // this indicates that we should use non shared buf ++ if pool == nil { ++ w.buf = make([]byte, batchSize) + } ++ return w + } + + func (w *bufWriter) Write(b []byte) (n int, err error) { +@@ -332,19 +325,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { + n, err = w.conn.Write(b) + return n, toIOError(err) + } ++ if w.buf == nil { ++ b := w.pool.Get().(*[]byte) ++ w.buf = *b ++ } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { +- err = w.Flush() ++ err = w.flushKeepBuffer() + } + } + return n, err + } + + func (w *bufWriter) Flush() error { ++ err := w.flushKeepBuffer() ++ // Only release the buffer if we are in a "shared" mode ++ if w.buf != nil && w.pool != nil { ++ b := w.buf ++ w.pool.Put(&b) ++ w.buf = nil ++ } ++ return err ++} ++ ++func (w *bufWriter) flushKeepBuffer() error { + if w.err != nil { + return w.err + } +@@ -381,7 +389,10 @@ type framer struct { + fr *http2.Framer + } + +-func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { ++var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) ++var writeBufferMutex sync.Mutex ++ ++func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } +@@ -389,7 +400,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } +- w := newBufWriter(conn, writeBufferSize) ++ var pool *sync.Pool ++ if sharedWriteBuffer { ++ pool = getWriteBufferPool(writeBufferSize) ++ } ++ w := newBufWriter(conn, writeBufferSize, pool) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), +@@ -403,6 +418,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList + return f + } + ++func getWriteBufferPool(writeBufferSize int) *sync.Pool { ++ writeBufferMutex.Lock() ++ defer writeBufferMutex.Unlock() ++ size := writeBufferSize * 2 ++ pool, ok := writeBufferPoolMap[size] ++ if ok { ++ return pool ++ } ++ pool = &sync.Pool{ ++ New: func() any { ++ b := make([]byte, size) ++ return &b ++ }, ++ } ++ writeBufferPoolMap[size] = pool ++ return pool ++} ++ + // parseDialTarget returns the network and address to pass to dialer. + func parseDialTarget(target string) (string, string) { + net := "tcp" +diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go +index aa1c896595d..aac056e723b 100644 +--- a/vendor/google.golang.org/grpc/internal/transport/transport.go ++++ b/vendor/google.golang.org/grpc/internal/transport/transport.go +@@ -43,10 +43,6 @@ import ( + "google.golang.org/grpc/tap" + ) + +-// ErrNoHeaders is used as a signal that a trailers only response was received, +-// and is not a real error. +-var ErrNoHeaders = errors.New("stream has no headers") +- + const logLevel = 2 + + type bufferPool struct { +@@ -56,7 +52,7 @@ type bufferPool struct { + func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ +- New: func() interface{} { ++ New: func() any { + return new(bytes.Buffer) + }, + }, +@@ -390,14 +386,10 @@ func (s *Stream) Header() (metadata.MD, error) { + } + s.waitOnHeader() + +- if !s.headerValid { ++ if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + +- if s.noHeaders { +- return nil, ErrNoHeaders +- } +- + return s.header.Copy(), nil + } + +@@ -559,6 +551,7 @@ type ServerConfig struct { + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int ++ SharedWriteBuffer bool + ChannelzParentID *channelz.Identifier + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 +@@ -592,6 +585,8 @@ type ConnectOptions struct { + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int ++ // SharedWriteBuffer indicates whether connections should reuse write buffer ++ SharedWriteBuffer bool + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID *channelz.Identifier + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. +@@ -703,7 +698,7 @@ type ClientTransport interface { + // Write methods for a given Stream will be called serially. + type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. +- HandleStreams(func(*Stream), func(context.Context, string) context.Context) ++ HandleStreams(func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. +@@ -736,7 +731,7 @@ type ServerTransport interface { + } + + // connectionErrorf creates an ConnectionError with the specified error description. +-func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { ++func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, +diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go +index 02f97595124..236837f4157 100644 +--- a/vendor/google.golang.org/grpc/picker_wrapper.go ++++ b/vendor/google.golang.org/grpc/picker_wrapper.go +@@ -28,21 +28,26 @@ import ( + "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" ++ "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + ) + + // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick + // actions and unblock when there's a picker update. + type pickerWrapper struct { +- mu sync.Mutex +- done bool +- idle bool +- blockingCh chan struct{} +- picker balancer.Picker ++ mu sync.Mutex ++ done bool ++ idle bool ++ blockingCh chan struct{} ++ picker balancer.Picker ++ statsHandlers []stats.Handler // to record blocking picker calls + } + +-func newPickerWrapper() *pickerWrapper { +- return &pickerWrapper{blockingCh: make(chan struct{})} ++func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { ++ return &pickerWrapper{ ++ blockingCh: make(chan struct{}), ++ statsHandlers: statsHandlers, ++ } + } + + // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +@@ -95,6 +100,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. + var ch chan struct{} + + var lastPickErr error ++ + for { + pw.mu.Lock() + if pw.done { +@@ -129,6 +135,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. + continue + } + ++ // If the channel is set, it means that the pick call had to wait for a ++ // new picker at some point. Either it's the first iteration and this ++ // function received the first picker, or a picker errored with ++ // ErrNoSubConnAvailable or errored with failfast set to false, which ++ // will trigger a continue to the next iteration. In the first case this ++ // conditional will hit if this call had to block (the channel is set). ++ // In the second case, the only way it will get to this conditional is ++ // if there is a new picker. ++ if ch != nil { ++ for _, sh := range pw.statsHandlers { ++ sh.HandleRPC(ctx, &stats.PickerUpdated{}) ++ } ++ } ++ + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() +diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go +index abe266b021d..2e9cf66b4af 100644 +--- a/vendor/google.golang.org/grpc/pickfirst.go ++++ b/vendor/google.golang.org/grpc/pickfirst.go +@@ -26,12 +26,18 @@ import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" ++ internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" ++ "google.golang.org/grpc/internal/pretty" ++ "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + ) + +-// PickFirstBalancerName is the name of the pick_first balancer. +-const PickFirstBalancerName = "pick_first" ++const ( ++ // PickFirstBalancerName is the name of the pick_first balancer. ++ PickFirstBalancerName = "pick_first" ++ logPrefix = "[pick-first-lb %p] " ++) + + func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +@@ -40,7 +46,9 @@ func newPickfirstBuilder() balancer.Builder { + type pickfirstBuilder struct{} + + func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +- return &pickfirstBalancer{cc: cc} ++ b := &pickfirstBalancer{cc: cc} ++ b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) ++ return b + } + + func (*pickfirstBuilder) Name() string { +@@ -57,23 +65,36 @@ type pfConfig struct { + } + + func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +- cfg := &pfConfig{} +- if err := json.Unmarshal(js, cfg); err != nil { ++ if !envconfig.PickFirstLBConfig { ++ // Prior to supporting loadbalancing configuration, the pick_first LB ++ // policy did not implement the balancer.ConfigParser interface. This ++ // meant that if a non-empty configuration was passed to it, the service ++ // config unmarshaling code would throw a warning log, but would ++ // continue using the pick_first LB policy. The code below ensures the ++ // same behavior is retained if the env var is not set. ++ if string(js) != "{}" { ++ logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) ++ } ++ return nil, nil ++ } ++ ++ var cfg pfConfig ++ if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil + } + + type pickfirstBalancer struct { ++ logger *internalgrpclog.PrefixLogger + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn +- cfg *pfConfig + } + + func (b *pickfirstBalancer) ResolverError(err error) { +- if logger.V(2) { +- logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) ++ if b.logger.V(2) { ++ b.logger.Infof("Received error from the name resolver: %v", err) + } + if b.subConn == nil { + b.state = connectivity.TransientFailure +@@ -96,35 +117,44 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { +- // Remove the old subConn. All addresses were removed, so it is no longer +- // valid. +- b.cc.RemoveSubConn(b.subConn) ++ // Shut down the old subConn. All addresses were removed, so it is ++ // no longer valid. ++ b.subConn.Shutdown() + b.subConn = nil + } + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + +- if state.BalancerConfig != nil { +- cfg, ok := state.BalancerConfig.(*pfConfig) +- if !ok { +- return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) +- } +- b.cfg = cfg ++ // We don't have to guard this block with the env var because ParseConfig ++ // already does so. ++ cfg, ok := state.BalancerConfig.(pfConfig) ++ if state.BalancerConfig != nil && !ok { ++ return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } +- +- if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { ++ if cfg.ShuffleAddressList { ++ addrs = append([]resolver.Address{}, addrs...) + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } ++ ++ if b.logger.V(2) { ++ b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) ++ } ++ + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, addrs) + return nil + } + +- subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) ++ var subConn balancer.SubConn ++ subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ ++ StateListener: func(state balancer.SubConnState) { ++ b.updateSubConnState(subConn, state) ++ }, ++ }) + if err != nil { +- if logger.V(2) { +- logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) ++ if b.logger.V(2) { ++ b.logger.Infof("Failed to create new SubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ +@@ -143,13 +173,19 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState + return nil + } + ++// UpdateSubConnState is unused as a StateListener is always registered when ++// creating SubConns. + func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { +- if logger.V(2) { +- logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) ++ b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) ++} ++ ++func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { ++ if b.logger.V(2) { ++ b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) + } + if b.subConn != subConn { +- if logger.V(2) { +- logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") ++ if b.logger.V(2) { ++ b.logger.Infof("Ignored state change because subConn is not recognized") + } + return + } +diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go +index cd45547854f..73bd6336433 100644 +--- a/vendor/google.golang.org/grpc/preloader.go ++++ b/vendor/google.golang.org/grpc/preloader.go +@@ -37,7 +37,7 @@ type PreparedMsg struct { + } + + // Encode marshalls and compresses the message using the codec and compressor for the stream. +-func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { ++func (p *PreparedMsg) Encode(s Stream, msg any) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { +diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go +index f27978e1281..0a4262342f3 100644 +--- a/vendor/google.golang.org/grpc/resolver/manual/manual.go ++++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go +@@ -26,13 +26,16 @@ import ( + "google.golang.org/grpc/resolver" + ) + +-// NewBuilderWithScheme creates a new test resolver builder with the given scheme. ++// NewBuilderWithScheme creates a new manual resolver builder with the given ++// scheme. Every instance of the manual resolver may only ever be used with a ++// single grpc.ClientConn. Otherwise, bad things will happen. + func NewBuilderWithScheme(scheme string) *Resolver { + return &Resolver{ +- BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, +- ResolveNowCallback: func(resolver.ResolveNowOptions) {}, +- CloseCallback: func() {}, +- scheme: scheme, ++ BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, ++ UpdateStateCallback: func(error) {}, ++ ResolveNowCallback: func(resolver.ResolveNowOptions) {}, ++ CloseCallback: func() {}, ++ scheme: scheme, + } + } + +@@ -42,6 +45,11 @@ type Resolver struct { + // BuildCallback is called when the Build method is called. Must not be + // nil. Must not be changed after the resolver may be built. + BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) ++ // UpdateStateCallback is called when the UpdateState method is called on ++ // the resolver. The value passed as argument to this callback is the value ++ // returned by the resolver.ClientConn. Must not be nil. Must not be ++ // changed after the resolver may be built. ++ UpdateStateCallback func(err error) + // ResolveNowCallback is called when the ResolveNow method is called on the + // resolver. Must not be nil. Must not be changed after the resolver may + // be built. +@@ -52,30 +60,34 @@ type Resolver struct { + scheme string + + // Fields actually belong to the resolver. +- mu sync.Mutex // Guards access to CC. +- CC resolver.ClientConn +- bootstrapState *resolver.State ++ // Guards access to below fields. ++ mu sync.Mutex ++ CC resolver.ClientConn ++ // Storing the most recent state update makes this resolver resilient to ++ // restarts, which is possible with channel idleness. ++ lastSeenState *resolver.State + } + + // InitialState adds initial state to the resolver so that UpdateState doesn't + // need to be explicitly called after Dial. + func (r *Resolver) InitialState(s resolver.State) { +- r.bootstrapState = &s ++ r.lastSeenState = &s + } + + // Build returns itself for Resolver, because it's both a builder and a resolver. + func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { ++ r.BuildCallback(target, cc, opts) + r.mu.Lock() + r.CC = cc +- r.mu.Unlock() +- r.BuildCallback(target, cc, opts) +- if r.bootstrapState != nil { +- r.UpdateState(*r.bootstrapState) ++ if r.lastSeenState != nil { ++ err := r.CC.UpdateState(*r.lastSeenState) ++ go r.UpdateStateCallback(err) + } ++ r.mu.Unlock() + return r, nil + } + +-// Scheme returns the test scheme. ++// Scheme returns the manual resolver's scheme. + func (r *Resolver) Scheme() string { + return r.scheme + } +@@ -93,8 +105,10 @@ func (r *Resolver) Close() { + // UpdateState calls CC.UpdateState. + func (r *Resolver) UpdateState(s resolver.State) { + r.mu.Lock() +- r.CC.UpdateState(s) ++ err := r.CC.UpdateState(s) ++ r.lastSeenState = &s + r.mu.Unlock() ++ r.UpdateStateCallback(err) + } + + // ReportError calls CC.ReportError. +diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go +index efcb7f3efd8..804be887de0 100644 +--- a/vendor/google.golang.org/grpc/resolver/map.go ++++ b/vendor/google.golang.org/grpc/resolver/map.go +@@ -20,7 +20,7 @@ package resolver + + type addressMapEntry struct { + addr Address +- value interface{} ++ value any + } + + // AddressMap is a map of addresses to arbitrary values taking into account +@@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { + } + + // Get returns the value for the address in the map, if present. +-func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { ++func (a *AddressMap) Get(addr Address) (value any, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { +@@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + } + + // Set updates or adds the value to the address in the map. +-func (a *AddressMap) Set(addr Address, value interface{}) { ++func (a *AddressMap) Set(addr Address, value any) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { +@@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { + } + + // Values returns a slice of all current map values. +-func (a *AddressMap) Values() []interface{} { +- ret := make([]interface{}, 0, a.Len()) ++func (a *AddressMap) Values() []any { ++ ret := make([]any, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) +diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go +index 353c10b69a5..11384e228e5 100644 +--- a/vendor/google.golang.org/grpc/resolver/resolver.go ++++ b/vendor/google.golang.org/grpc/resolver/resolver.go +@@ -77,25 +77,6 @@ func GetDefaultScheme() string { + return defaultScheme + } + +-// AddressType indicates the address type returned by name resolution. +-// +-// Deprecated: use Attributes in Address instead. +-type AddressType uint8 +- +-const ( +- // Backend indicates the address is for a backend server. +- // +- // Deprecated: use Attributes in Address instead. +- Backend AddressType = iota +- // GRPCLB indicates the address is for a grpclb load balancer. +- // +- // Deprecated: to select the GRPCLB load balancing policy, use a service +- // config with a corresponding loadBalancingConfig. To supply balancer +- // addresses to the GRPCLB load balancing policy, set State.Attributes +- // using balancer/grpclb/state.Set. +- GRPCLB +-) +- + // Address represents a server the client connects to. + // + // # Experimental +@@ -111,9 +92,6 @@ type Address struct { + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // +- // If Type is GRPCLB, ServerName should be the name of the remote load +- // balancer, not the name of the backend. +- // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. +@@ -126,27 +104,29 @@ type Address struct { + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attributes do not affect SubConn + // creation, connection establishment, handshaking, etc. +- BalancerAttributes *attributes.Attributes +- +- // Type is the type of this address. + // +- // Deprecated: use Attributes instead. +- Type AddressType ++ // Deprecated: when an Address is inside an Endpoint, this field should not ++ // be used, and it will eventually be removed entirely. ++ BalancerAttributes *attributes.Attributes + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. +- Metadata interface{} ++ Metadata any + } + + // Equal returns whether a and o are identical. Metadata is compared directly, + // not with any recursive introspection. ++// ++// This method compares all fields of the address. When used to tell apart ++// addresses during subchannel creation or connection establishment, it might be ++// more appropriate for the caller to implement custom equality logic. + func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && +- a.Type == o.Type && a.Metadata == o.Metadata ++ a.Metadata == o.Metadata + } + + // String returns JSON formatted string representation of the address. +@@ -190,11 +170,37 @@ type BuildOptions struct { + Dialer func(context.Context, string) (net.Conn, error) + } + ++// An Endpoint is one network endpoint, or server, which may have multiple ++// addresses with which it can be accessed. ++type Endpoint struct { ++ // Addresses contains a list of addresses used to access this endpoint. ++ Addresses []Address ++ ++ // Attributes contains arbitrary data about this endpoint intended for ++ // consumption by the LB policy. ++ Attributes *attributes.Attributes ++} ++ + // State contains the current Resolver state relevant to the ClientConn. + type State struct { + // Addresses is the latest set of resolved addresses for the target. ++ // ++ // If a resolver sets Addresses but does not set Endpoints, one Endpoint ++ // will be created for each Address before the State is passed to the LB ++ // policy. The BalancerAttributes of each entry in Addresses will be set ++ // in Endpoints.Attributes, and be cleared in the Endpoint's Address's ++ // BalancerAttributes. ++ // ++ // Soon, Addresses will be deprecated and replaced fully by Endpoints. + Addresses []Address + ++ // Endpoints is the latest set of resolved endpoints for the target. ++ // ++ // If a resolver produces a State containing Endpoints but not Addresses, ++ // it must take care to ensure the LB policies it selects will support ++ // Endpoints. ++ Endpoints []Endpoint ++ + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. +@@ -254,20 +260,7 @@ type ClientConn interface { + // target does not contain a scheme or if the parsed scheme is not registered + // (i.e. no corresponding resolver available to resolve the endpoint), we will + // apply the default scheme, and will attempt to reparse it. +-// +-// Examples: +-// +-// - "dns://some_authority/foo.bar" +-// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +-// - "foo.bar" +-// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +-// - "unknown_scheme://authority/endpoint" +-// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} + type Target struct { +- // Deprecated: use URL.Scheme instead. +- Scheme string +- // Deprecated: use URL.Host instead. +- Authority string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial +@@ -321,10 +314,3 @@ type Resolver interface { + // Close closes the resolver. + Close() + } +- +-// UnregisterForTesting removes the resolver builder with the given scheme from the +-// resolver map. +-// This function is for testing only. +-func UnregisterForTesting(scheme string) { +- delete(m, scheme) +-} +diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +index b408b3688f2..d6833056084 100644 +--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go ++++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +@@ -133,7 +133,7 @@ func (ccr *ccResolverWrapper) close() { + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. +- <-ccr.serializer.Done ++ <-ccr.serializer.Done() + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. +@@ -152,6 +152,14 @@ func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) + // which includes addresses and service config. + func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + errCh := make(chan error, 1) ++ if s.Endpoints == nil { ++ s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) ++ for _, a := range s.Addresses { ++ ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} ++ ep.Addresses[0].BalancerAttributes = nil ++ s.Endpoints = append(s.Endpoints, ep) ++ } ++ } + ok := ccr.serializer.Schedule(func(context.Context) { + ccr.addChannelzTraceEvent(s) + ccr.curState = s +diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go +index 2030736a306..b7723aa09cb 100644 +--- a/vendor/google.golang.org/grpc/rpc_util.go ++++ b/vendor/google.golang.org/grpc/rpc_util.go +@@ -75,7 +75,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + } + return &gzipCompressor{ + pool: sync.Pool{ +- New: func() interface{} { ++ New: func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) +@@ -577,6 +577,9 @@ type parser struct { + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte ++ ++ // recvBufferPool is the pool of shared receive buffers. ++ recvBufferPool SharedBufferPool + } + + // recvMsg reads a complete gRPC message from the stream. +@@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } +- // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead +- // of making it for each message: +- msg = make([]byte, int(length)) ++ msg = p.recvBufferPool.Get(int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF +@@ -625,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt + // encode serializes msg and returns a buffer containing the message, or an + // error if it is too large to be transmitted by grpc. If msg is nil, it + // generates an empty message. +-func encode(c baseCodec, msg interface{}) ([]byte, error) { ++func encode(c baseCodec, msg any) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } +@@ -692,7 +693,7 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + return hdr, data + } + +-func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { ++func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, +@@ -726,12 +727,12 @@ type payloadInfo struct { + } + + func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { +- pf, d, err := p.recvMsg(maxReceiveMessageSize) ++ pf, buf, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { +- payInfo.compressedLength = len(d) ++ payInfo.compressedLength = len(buf) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { +@@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { +- d, err = dc.Do(bytes.NewReader(d)) +- size = len(d) ++ buf, err = dc.Do(bytes.NewReader(buf)) ++ size = len(buf) + } else { +- d, size, err = decompress(compressor, d, maxReceiveMessageSize) ++ buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) +@@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + } +- return d, nil ++ return buf, nil + } + + // Using compressor, decompress d, returning data and size. +@@ -791,16 +792,18 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize + // For the two compressor parameters, both should not be set, but if they are, + // dc takes precedence over compressor. + // TODO(dfawley): wrap the old compressor/decompressor using the new API? +-func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { +- d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) ++func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { ++ buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } +- if err := c.Unmarshal(d, m); err != nil { ++ if err := c.Unmarshal(buf, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) + } + if payInfo != nil { +- payInfo.uncompressedBytes = d ++ payInfo.uncompressedBytes = buf ++ } else { ++ p.recvBufferPool.Put(&buf) + } + return nil + } +@@ -860,19 +863,22 @@ func ErrorDesc(err error) string { + // Errorf returns nil if c is OK. + // + // Deprecated: use status.Errorf instead. +-func Errorf(c codes.Code, format string, a ...interface{}) error { ++func Errorf(c codes.Code, format string, a ...any) error { + return status.Errorf(c, format, a...) + } + ++var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) ++var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) ++ + // toRPCErr converts an error into an error from the status package. + func toRPCErr(err error) error { + switch err { + case nil, io.EOF: + return err + case context.DeadlineExceeded: +- return status.Error(codes.DeadlineExceeded, err.Error()) ++ return errContextDeadline + case context.Canceled: +- return status.Error(codes.Canceled, err.Error()) ++ return errContextCanceled + case io.ErrUnexpectedEOF: + return status.Error(codes.Internal, err.Error()) + } +diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go +index 8869cc906f2..8f60d421437 100644 +--- a/vendor/google.golang.org/grpc/server.go ++++ b/vendor/google.golang.org/grpc/server.go +@@ -86,7 +86,7 @@ func init() { + var statusOK = status.New(codes.OK, "") + var logger = grpclog.Component("core") + +-type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) ++type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) + + // MethodDesc represents an RPC service's method specification. + type MethodDesc struct { +@@ -99,20 +99,20 @@ type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. +- HandlerType interface{} ++ HandlerType any + Methods []MethodDesc + Streams []StreamDesc +- Metadata interface{} ++ Metadata any + } + + // serviceInfo wraps information about a service. It is very similar to + // ServiceDesc and is constructed from it for internal purposes. + type serviceInfo struct { + // Contains the implementation for the methods in this service. +- serviceImpl interface{} ++ serviceImpl any + methods map[string]*MethodDesc + streams map[string]*StreamDesc +- mdata interface{} ++ mdata any + } + + // Server is a gRPC server to serve RPC requests. +@@ -164,10 +164,12 @@ type serverOptions struct { + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int ++ sharedWriteBuffer bool + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 ++ recvBufferPool SharedBufferPool + } + + var defaultServerOptions = serverOptions{ +@@ -177,6 +179,7 @@ var defaultServerOptions = serverOptions{ + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, ++ recvBufferPool: nopBufferPool{}, + } + var globalServerOptions []ServerOption + +@@ -228,6 +231,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} + } + ++// SharedWriteBuffer allows reusing per-connection transport write buffer. ++// If this option is set to true every connection will release the buffer after ++// flushing the data on the wire. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func SharedWriteBuffer(val bool) ServerOption { ++ return newFuncServerOption(func(o *serverOptions) { ++ o.sharedWriteBuffer = val ++ }) ++} ++ + // WriteBufferSize determines how much data can be batched before doing a write + // on the wire. The corresponding memory allocation for this buffer will be + // twice the size to keep syscalls low. The default value for this buffer is +@@ -268,9 +285,9 @@ func InitialConnWindowSize(s int32) ServerOption { + + // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. + func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { +- if kp.Time > 0 && kp.Time < time.Second { ++ if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") +- kp.Time = time.Second ++ kp.Time = internal.KeepaliveMinServerPingTime + } + + return newFuncServerOption(func(o *serverOptions) { +@@ -550,6 +567,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { + }) + } + ++// RecvBufferPool returns a ServerOption that configures the server ++// to use the provided shared buffer pool for parsing incoming messages. Depending ++// on the application's workload, this could result in reduced memory allocation. ++// ++// If you are unsure about how to implement a memory pool but want to utilize one, ++// begin with grpc.NewSharedBufferPool. ++// ++// Note: The shared buffer pool feature will not be active if any of the following ++// options are used: StatsHandler, EnableTracing, or binary logging. In such ++// cases, the shared buffer pool will be ignored. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { ++ return newFuncServerOption(func(o *serverOptions) { ++ o.recvBufferPool = bufferPool ++ }) ++} ++ + // serverWorkerResetThreshold defines how often the stack must be reset. Every + // N requests, by spawning a new goroutine in its place, a worker can reset its + // stack so that large stacks don't live in memory forever. 2^16 should allow +@@ -625,7 +663,7 @@ func NewServer(opt ...ServerOption) *Server { + + // printf records an event in s's event log, unless s has been stopped. + // REQUIRES s.mu is held. +-func (s *Server) printf(format string, a ...interface{}) { ++func (s *Server) printf(format string, a ...any) { + if s.events != nil { + s.events.Printf(format, a...) + } +@@ -633,7 +671,7 @@ func (s *Server) printf(format string, a ...interface{}) { + + // errorf records an error in s's event log, unless s has been stopped. + // REQUIRES s.mu is held. +-func (s *Server) errorf(format string, a ...interface{}) { ++func (s *Server) errorf(format string, a ...any) { + if s.events != nil { + s.events.Errorf(format, a...) + } +@@ -648,14 +686,14 @@ type ServiceRegistrar interface { + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. +- RegisterService(desc *ServiceDesc, impl interface{}) ++ RegisterService(desc *ServiceDesc, impl any) + } + + // RegisterService registers a service and its implementation to the gRPC + // server. It is called from the IDL generated code. This must be called before + // invoking Serve. If ss is non-nil (for legacy code), its type is checked to + // ensure it implements sd.HandlerType. +-func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { ++func (s *Server) RegisterService(sd *ServiceDesc, ss any) { + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) +@@ -666,7 +704,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + s.register(sd, ss) + } + +-func (s *Server) register(sd *ServiceDesc, ss interface{}) { ++func (s *Server) register(sd *ServiceDesc, ss any) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) +@@ -707,7 +745,7 @@ type MethodInfo struct { + type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. +- Metadata interface{} ++ Metadata any + } + + // GetServiceInfo returns a map from service names to ServiceInfo. +@@ -908,6 +946,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, ++ SharedWriteBuffer: s.opts.sharedWriteBuffer, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, +@@ -944,7 +983,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { + f := func() { + defer streamQuota.release() + defer wg.Done() +- s.handleStream(st, stream, s.traceInfo(st, stream)) ++ s.handleStream(st, stream) + } + + if s.opts.numServerWorkers > 0 { +@@ -956,12 +995,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { + } + } + go f() +- }, func(ctx context.Context, method string) context.Context { +- if !EnableTracing { +- return ctx +- } +- tr := trace.New("grpc.Recv."+methodFamily(method), method) +- return trace.NewContext(ctx, tr) + }) + wg.Wait() + } +@@ -1010,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.serveStreams(st) + } + +-// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +-// If tracing is not enabled, it returns nil. +-func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { +- if !EnableTracing { +- return nil +- } +- tr, ok := trace.FromContext(stream.Context()) +- if !ok { +- return nil +- } +- +- trInfo = &traceInfo{ +- tr: tr, +- firstLine: firstLine{ +- client: false, +- remoteAddr: st.RemoteAddr(), +- }, +- } +- if dl, ok := stream.Context().Deadline(); ok { +- trInfo.firstLine.deadline = time.Until(dl) +- } +- return trInfo +-} +- + func (s *Server) addConn(addr string, st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() +@@ -1094,7 +1103,7 @@ func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) + } + +-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { ++func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) +@@ -1113,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str + err = t.Write(stream, hdr, payload, opts) + if err == nil { + for _, sh := range s.opts.statsHandlers { +- sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) ++ sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + } + } + return err +@@ -1141,7 +1150,7 @@ func chainUnaryServerInterceptors(s *Server) { + } + + func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { +- return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { ++ return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } + } +@@ -1150,12 +1159,12 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info + if curr == len(interceptors)-1 { + return finalHandler + } +- return func(ctx context.Context, req interface{}) (interface{}, error) { ++ return func(ctx context.Context, req any) (any, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } + } + +-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { ++func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { +@@ -1169,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + IsClientStream: false, + IsServerStream: false, + } +- sh.HandleRPC(stream.Context(), statsBegin) ++ sh.HandleRPC(ctx, statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) +@@ -1187,7 +1196,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { +- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ++ trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() +@@ -1201,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } +- sh.HandleRPC(stream.Context(), end) ++ sh.HandleRPC(ctx, end) + } + + if channelz.IsOn() { +@@ -1223,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + } + } + if len(binlogs) != 0 { +- ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, +@@ -1294,7 +1302,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if len(shs) != 0 || len(binlogs) != 0 { + payInfo = &payloadInfo{} + } +- d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) ++ d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) +@@ -1304,12 +1312,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if channelz.IsOn() { + t.IncrMsgRecv() + } +- df := func(v interface{}) error { ++ df := func(v any) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + for _, sh := range shs { +- sh.HandleRPC(stream.Context(), &stats.InPayload{ ++ sh.HandleRPC(ctx, &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + Length: len(d), +@@ -1323,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Message: d, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), cm) ++ binlog.Log(ctx, cm) + } + } + if trInfo != nil { +@@ -1331,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + } + return nil + } +- ctx := NewContextWithServerTransportStream(stream.Context(), stream) ++ ctx = NewContextWithServerTransportStream(ctx, stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) +@@ -1356,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Header: h, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), sh) ++ binlog.Log(ctx, sh) + } + } + st := &binarylog.ServerTrailer{ +@@ -1364,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Err: appErr, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), st) ++ binlog.Log(ctx, st) + } + } + return appErr +@@ -1379,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } +- if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { ++ if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err +@@ -1406,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Err: appErr, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), sh) +- binlog.Log(stream.Context(), st) ++ binlog.Log(ctx, sh) ++ binlog.Log(ctx, st) + } + } + return err +@@ -1421,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Message: reply, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), sh) +- binlog.Log(stream.Context(), sm) ++ binlog.Log(ctx, sh) ++ binlog.Log(ctx, sm) + } + } + if channelz.IsOn() { +@@ -1440,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. + Err: appErr, + } + for _, binlog := range binlogs { +- binlog.Log(stream.Context(), st) ++ binlog.Log(ctx, st) + } + } + return t.WriteStatus(stream, statusOK) +@@ -1468,7 +1476,7 @@ func chainStreamServerInterceptors(s *Server) { + } + + func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { +- return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { ++ return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } + } +@@ -1477,12 +1485,12 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf + if curr == len(interceptors)-1 { + return finalHandler + } +- return func(srv interface{}, stream ServerStream) error { ++ return func(srv any, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } + } + +-func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { ++func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } +@@ -1496,15 +1504,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + IsServerStream: sd.ServerStreams, + } + for _, sh := range shs { +- sh.HandleRPC(stream.Context(), statsBegin) ++ sh.HandleRPC(ctx, statsBegin) + } + } +- ctx := NewContextWithServerTransportStream(stream.Context(), stream) ++ ctx = NewContextWithServerTransportStream(ctx, stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, +- p: &parser{r: stream}, ++ p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, +@@ -1518,7 +1526,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { +- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ++ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() +@@ -1535,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + end.Error = toRPCErr(err) + } + for _, sh := range shs { +- sh.HandleRPC(stream.Context(), end) ++ sh.HandleRPC(ctx, end) + } + } + +@@ -1577,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range ss.binlogs { +- binlog.Log(stream.Context(), logEntry) ++ binlog.Log(ctx, logEntry) + } + } + +@@ -1621,7 +1629,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error +- var server interface{} ++ var server any + if info != nil { + server = info.serviceImpl + } +@@ -1655,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + Err: appErr, + } + for _, binlog := range ss.binlogs { +- binlog.Log(stream.Context(), st) ++ binlog.Log(ctx, st) + } + } + t.WriteStatus(ss.s, appStatus) +@@ -1673,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp + Err: appErr, + } + for _, binlog := range ss.binlogs { +- binlog.Log(stream.Context(), st) ++ binlog.Log(ctx, st) + } + } + return t.WriteStatus(ss.s, statusOK) + } + +-func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { ++func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { ++ ctx := stream.Context() ++ var ti *traceInfo ++ if EnableTracing { ++ tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) ++ ctx = trace.NewContext(ctx, tr) ++ ti = &traceInfo{ ++ tr: tr, ++ firstLine: firstLine{ ++ client: false, ++ remoteAddr: t.RemoteAddr(), ++ }, ++ } ++ if dl, ok := ctx.Deadline(); ok { ++ ti.firstLine.deadline = time.Until(dl) ++ } ++ } ++ + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { +- if trInfo != nil { +- trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) ++ ti.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { +- if trInfo != nil { +- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ++ ti.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } +- if trInfo != nil { +- trInfo.tr.Finish() ++ if ti != nil { ++ ti.tr.Finish() + } + return + } +@@ -1709,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str + srv, knownService := s.services[service] + if knownService { + if md, ok := srv.methods[method]; ok { +- s.processUnaryRPC(t, stream, srv, md, trInfo) ++ s.processUnaryRPC(ctx, t, stream, srv, md, ti) + return + } + if sd, ok := srv.streams[method]; ok { +- s.processStreamingRPC(t, stream, srv, sd, trInfo) ++ s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { +- s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) ++ s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + return + } + var errDesc string +@@ -1728,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } +- if trInfo != nil { +- trInfo.tr.LazyPrintf("%s", errDesc) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyPrintf("%s", errDesc) ++ ti.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { +- if trInfo != nil { +- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) +- trInfo.tr.SetError() ++ if ti != nil { ++ ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ++ ti.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } +- if trInfo != nil { +- trInfo.tr.Finish() ++ if ti != nil { ++ ti.tr.Finish() + } + } + +@@ -2054,12 +2079,12 @@ func validateSendCompressor(name, clientCompressors string) error { + // atomicSemaphore implements a blocking, counting semaphore. acquire should be + // called synchronously; release may be called asynchronously. + type atomicSemaphore struct { +- n int64 ++ n atomic.Int64 + wait chan struct{} + } + + func (q *atomicSemaphore) acquire() { +- if atomic.AddInt64(&q.n, -1) < 0 { ++ if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +@@ -2070,12 +2095,14 @@ func (q *atomicSemaphore) release() { + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. +- if atomic.AddInt64(&q.n, 1) <= 0 { ++ if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } + } + + func newHandlerQuota(n uint32) *atomicSemaphore { +- return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)} ++ a := &atomicSemaphore{wait: make(chan struct{}, 1)} ++ a.n.Store(int64(n)) ++ return a + } +diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go +new file mode 100644 +index 00000000000..48a64cfe8e2 +--- /dev/null ++++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go +@@ -0,0 +1,154 @@ ++/* ++ * ++ * Copyright 2023 gRPC authors. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++package grpc ++ ++import "sync" ++ ++// SharedBufferPool is a pool of buffers that can be shared, resulting in ++// decreased memory allocation. Currently, in gRPC-go, it is only utilized ++// for parsing incoming messages. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++type SharedBufferPool interface { ++ // Get returns a buffer with specified length from the pool. ++ // ++ // The returned byte slice may be not zero initialized. ++ Get(length int) []byte ++ ++ // Put returns a buffer to the pool. ++ Put(*[]byte) ++} ++ ++// NewSharedBufferPool creates a simple SharedBufferPool with buckets ++// of different sizes to optimize memory usage. This prevents the pool from ++// wasting large amounts of memory, even when handling messages of varying sizes. ++// ++// # Experimental ++// ++// Notice: This API is EXPERIMENTAL and may be changed or removed in a ++// later release. ++func NewSharedBufferPool() SharedBufferPool { ++ return &simpleSharedBufferPool{ ++ pools: [poolArraySize]simpleSharedBufferChildPool{ ++ newBytesPool(level0PoolMaxSize), ++ newBytesPool(level1PoolMaxSize), ++ newBytesPool(level2PoolMaxSize), ++ newBytesPool(level3PoolMaxSize), ++ newBytesPool(level4PoolMaxSize), ++ newBytesPool(0), ++ }, ++ } ++} ++ ++// simpleSharedBufferPool is a simple implementation of SharedBufferPool. ++type simpleSharedBufferPool struct { ++ pools [poolArraySize]simpleSharedBufferChildPool ++} ++ ++func (p *simpleSharedBufferPool) Get(size int) []byte { ++ return p.pools[p.poolIdx(size)].Get(size) ++} ++ ++func (p *simpleSharedBufferPool) Put(bs *[]byte) { ++ p.pools[p.poolIdx(cap(*bs))].Put(bs) ++} ++ ++func (p *simpleSharedBufferPool) poolIdx(size int) int { ++ switch { ++ case size <= level0PoolMaxSize: ++ return level0PoolIdx ++ case size <= level1PoolMaxSize: ++ return level1PoolIdx ++ case size <= level2PoolMaxSize: ++ return level2PoolIdx ++ case size <= level3PoolMaxSize: ++ return level3PoolIdx ++ case size <= level4PoolMaxSize: ++ return level4PoolIdx ++ default: ++ return levelMaxPoolIdx ++ } ++} ++ ++const ( ++ level0PoolMaxSize = 16 // 16 B ++ level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B ++ level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB ++ level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB ++ level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB ++) ++ ++const ( ++ level0PoolIdx = iota ++ level1PoolIdx ++ level2PoolIdx ++ level3PoolIdx ++ level4PoolIdx ++ levelMaxPoolIdx ++ poolArraySize ++) ++ ++type simpleSharedBufferChildPool interface { ++ Get(size int) []byte ++ Put(any) ++} ++ ++type bufferPool struct { ++ sync.Pool ++ ++ defaultSize int ++} ++ ++func (p *bufferPool) Get(size int) []byte { ++ bs := p.Pool.Get().(*[]byte) ++ ++ if cap(*bs) < size { ++ p.Pool.Put(bs) ++ ++ return make([]byte, size) ++ } ++ ++ return (*bs)[:size] ++} ++ ++func newBytesPool(size int) simpleSharedBufferChildPool { ++ return &bufferPool{ ++ Pool: sync.Pool{ ++ New: func() any { ++ bs := make([]byte, size) ++ return &bs ++ }, ++ }, ++ defaultSize: size, ++ } ++} ++ ++// nopBufferPool is a buffer pool just makes new buffer without pooling. ++type nopBufferPool struct { ++} ++ ++func (nopBufferPool) Get(length int) []byte { ++ return make([]byte, length) ++} ++ ++func (nopBufferPool) Put(*[]byte) { ++} +diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go +index 7a552a9b787..4ab70e2d462 100644 +--- a/vendor/google.golang.org/grpc/stats/stats.go ++++ b/vendor/google.golang.org/grpc/stats/stats.go +@@ -59,12 +59,22 @@ func (s *Begin) IsClient() bool { return s.Client } + + func (s *Begin) isRPCStats() {} + ++// PickerUpdated indicates that the LB policy provided a new picker while the ++// RPC was waiting for one. ++type PickerUpdated struct{} ++ ++// IsClient indicates if the stats information is from client side. Only Client ++// Side interfaces with a Picker, thus always returns true. ++func (*PickerUpdated) IsClient() bool { return true } ++ ++func (*PickerUpdated) isRPCStats() {} ++ + // InPayload contains the information for an incoming payload. + type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. +- Payload interface{} ++ Payload any + // Data is the serialized message payload. + Data []byte + +@@ -134,7 +144,7 @@ type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. +- Payload interface{} ++ Payload any + // Data is the serialized message payload. + Data []byte + // Length is the size of the uncompressed payload data. Does not include any +diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go +index bcf2e4d81be..a93360efb84 100644 +--- a/vendor/google.golang.org/grpc/status/status.go ++++ b/vendor/google.golang.org/grpc/status/status.go +@@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { + } + + // Newf returns New(c, fmt.Sprintf(format, a...)). +-func Newf(c codes.Code, format string, a ...interface{}) *Status { ++func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) + } + +@@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { + } + + // Errorf returns Error(c, fmt.Sprintf(format, a...)). +-func Errorf(c codes.Code, format string, a ...interface{}) error { ++func Errorf(c codes.Code, format string, a ...any) error { + return Error(c, fmt.Sprintf(format, a...)) + } + +@@ -99,25 +99,27 @@ func FromError(err error) (s *Status, ok bool) { + } + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { +- if gs.GRPCStatus() == nil { ++ grpcStatus := gs.GRPCStatus() ++ if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } +- return gs.GRPCStatus(), true ++ return grpcStatus, true + } + var gs grpcstatus + if errors.As(err, &gs) { +- if gs.GRPCStatus() == nil { ++ grpcStatus := gs.GRPCStatus() ++ if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } +- p := gs.GRPCStatus().Proto() ++ p := grpcStatus.Proto() + p.Message = err.Error() + return status.FromProto(p), true + } +diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go +index 10092685b22..b14b2fbea2e 100644 +--- a/vendor/google.golang.org/grpc/stream.go ++++ b/vendor/google.golang.org/grpc/stream.go +@@ -31,6 +31,7 @@ import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" ++ "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" +@@ -54,7 +55,7 @@ import ( + // status package, or be one of the context errors. Otherwise, gRPC will use + // codes.Unknown as the status code and err.Error() as the status message of the + // RPC. +-type StreamHandler func(srv interface{}, stream ServerStream) error ++type StreamHandler func(srv any, stream ServerStream) error + + // StreamDesc represents a streaming RPC service's method specification. Used + // on the server when registering services and on the client when initiating +@@ -79,9 +80,9 @@ type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. +- SendMsg(m interface{}) error ++ SendMsg(m any) error + // Deprecated: See ClientStream and ServerStream documentation instead. +- RecvMsg(m interface{}) error ++ RecvMsg(m any) error + } + + // ClientStream defines the client-side behavior of a streaming RPC. +@@ -90,7 +91,9 @@ type Stream interface { + // status package. + type ClientStream interface { + // Header returns the header metadata received from the server if there +- // is any. It blocks if the metadata is not ready to read. ++ // is any. It blocks if the metadata is not ready to read. If the metadata ++ // is nil and the error is also nil, then the stream was terminated without ++ // headers, and the status can be discovered by calling RecvMsg. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or +@@ -126,7 +129,7 @@ type ClientStream interface { + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. +- SendMsg(m interface{}) error ++ SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC +@@ -135,7 +138,7 @@ type ClientStream interface { + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. +- RecvMsg(m interface{}) error ++ RecvMsg(m any) error + } + + // NewStream creates a new Stream for the client side. This is typically +@@ -155,11 +158,6 @@ type ClientStream interface { + // If none of the above happen, a goroutine and a context will be leaked, and grpc + // will not call the optionally-configured stats handler with a stats.End message. + func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { +- if err := cc.idlenessMgr.onCallBegin(); err != nil { +- return nil, err +- } +- defer cc.idlenessMgr.onCallEnd() +- + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) +@@ -176,6 +174,16 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth + } + + func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { ++ // Start tracking the RPC for idleness purposes. This is where a stream is ++ // created for both streaming and unary RPCs, and hence is a good place to ++ // track active RPC count. ++ if err := cc.idlenessMgr.OnCallBegin(); err != nil { ++ return nil, err ++ } ++ // Add a calloption, to decrement the active call count, that gets executed ++ // when the RPC completes. ++ opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) ++ + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // validate md + if err := imetadata.Validate(md); err != nil { +@@ -433,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) + ctx = trace.NewContext(ctx, trInfo.tr) + } + +- if cs.cc.parsedTarget.URL.Scheme == "xds" { ++ if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( +@@ -507,7 +515,7 @@ func (a *csAttempt) newStream() error { + return toRPCErr(nse.Err) + } + a.s = s +- a.p = &parser{r: s} ++ a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + return nil + } + +@@ -788,23 +796,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) + + func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD +- noHeader := false + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() +- if err == transport.ErrNoHeaders { +- noHeader = true +- return nil +- } + return toRPCErr(err) + }, cs.commitAttemptLocked) + ++ if m == nil && err == nil { ++ // The stream ended with success. Finish the clientStream. ++ err = io.EOF ++ } ++ + if err != nil { + cs.finish(err) +- return nil, err ++ // Do not return the error. The user should get it by calling Recv(). ++ return nil, nil + } + +- if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { ++ if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. + logEntry := &binarylog.ServerHeader{ +@@ -820,6 +829,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { + binlog.Log(cs.ctx, logEntry) + } + } ++ + return m, nil + } + +@@ -860,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error + cs.buffer = append(cs.buffer, op) + } + +-func (cs *clientStream) SendMsg(m interface{}) (err error) { ++func (cs *clientStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg +@@ -904,7 +914,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { + return err + } + +-func (cs *clientStream) RecvMsg(m interface{}) error { ++func (cs *clientStream) RecvMsg(m any) error { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() +@@ -928,24 +938,6 @@ func (cs *clientStream) RecvMsg(m interface{}) error { + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) +- +- if len(cs.binlogs) != 0 { +- // finish will not log Trailer. Log Trailer here. +- logEntry := &binarylog.ServerTrailer{ +- OnClientSide: true, +- Trailer: cs.Trailer(), +- Err: err, +- } +- if logEntry.Err == io.EOF { +- logEntry.Err = nil +- } +- if peer, ok := peer.FromContext(cs.Context()); ok { +- logEntry.PeerAddr = peer.Addr +- } +- for _, binlog := range cs.binlogs { +- binlog.Log(cs.ctx, logEntry) +- } +- } + } + return err + } +@@ -1001,18 +993,30 @@ func (cs *clientStream) finish(err error) { + } + } + } ++ + cs.mu.Unlock() +- // For binary logging. only log cancel in finish (could be caused by RPC ctx +- // canceled or ClientConn closed). Trailer will be logged in RecvMsg. +- // +- // Only one of cancel or trailer needs to be logged. In the cases where +- // users don't call RecvMsg, users must have already canceled the RPC. +- if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { +- c := &binarylog.Cancel{ +- OnClientSide: true, +- } +- for _, binlog := range cs.binlogs { +- binlog.Log(cs.ctx, c) ++ // Only one of cancel or trailer needs to be logged. ++ if len(cs.binlogs) != 0 { ++ switch err { ++ case errContextCanceled, errContextDeadline, ErrClientConnClosing: ++ c := &binarylog.Cancel{ ++ OnClientSide: true, ++ } ++ for _, binlog := range cs.binlogs { ++ binlog.Log(cs.ctx, c) ++ } ++ default: ++ logEntry := &binarylog.ServerTrailer{ ++ OnClientSide: true, ++ Trailer: cs.Trailer(), ++ Err: err, ++ } ++ if peer, ok := peer.FromContext(cs.Context()); ok { ++ logEntry.PeerAddr = peer.Addr ++ } ++ for _, binlog := range cs.binlogs { ++ binlog.Log(cs.ctx, logEntry) ++ } + } + } + if err == nil { +@@ -1028,7 +1032,7 @@ func (cs *clientStream) finish(err error) { + cs.cancel() + } + +-func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { ++func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() +@@ -1055,7 +1059,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + return nil + } + +-func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { ++func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { + cs := a.cs + if len(a.statsHandlers) != 0 && payInfo == nil { + payInfo = &payloadInfo{} +@@ -1270,7 +1274,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin + return nil, err + } + as.s = s +- as.p = &parser{r: s} ++ as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on stream context to cleanup when the stream context is +@@ -1348,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context { + return as.s.Context() + } + +-func (as *addrConnStream) SendMsg(m interface{}) (err error) { ++func (as *addrConnStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg +@@ -1393,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { + return nil + } + +-func (as *addrConnStream) RecvMsg(m interface{}) (err error) { ++func (as *addrConnStream) RecvMsg(m any) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. +@@ -1512,7 +1516,7 @@ type ServerStream interface { + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. +- SendMsg(m interface{}) error ++ SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the +@@ -1521,7 +1525,7 @@ type ServerStream interface { + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. +- RecvMsg(m interface{}) error ++ RecvMsg(m any) error + } + + // serverStream implements a server side Stream. +@@ -1602,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { + ss.s.SetTrailer(md) + } + +-func (ss *serverStream) SendMsg(m interface{}) (err error) { ++func (ss *serverStream) SendMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() +@@ -1610,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { +- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ++ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } +@@ -1677,7 +1681,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { + return nil + } + +-func (ss *serverStream) RecvMsg(m interface{}) (err error) { ++func (ss *serverStream) RecvMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() +@@ -1685,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { +- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ++ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } +@@ -1757,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { + // prepareMsg returns the hdr, payload and data + // using the compressors passed or using the + // passed preparedmsg +-func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { ++func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } +diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go +index bfa5dfa40e4..07f01257688 100644 +--- a/vendor/google.golang.org/grpc/tap/tap.go ++++ b/vendor/google.golang.org/grpc/tap/tap.go +@@ -27,6 +27,8 @@ package tap + + import ( + "context" ++ ++ "google.golang.org/grpc/metadata" + ) + + // Info defines the relevant information needed by the handles. +@@ -34,6 +36,10 @@ type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string ++ ++ // Header contains the header metadata received. ++ Header metadata.MD ++ + // TODO: More to be added. + } + +diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go +index 07a2d26b3e7..9ded79321ba 100644 +--- a/vendor/google.golang.org/grpc/trace.go ++++ b/vendor/google.golang.org/grpc/trace.go +@@ -97,8 +97,8 @@ func truncate(x string, l int) string { + + // payload represents an RPC request or response payload. + type payload struct { +- sent bool // whether this is an outgoing payload +- msg interface{} // e.g. a proto.Message ++ sent bool // whether this is an outgoing payload ++ msg any // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? + } + +@@ -111,7 +111,7 @@ func (p payload) String() string { + + type fmtStringer struct { + format string +- a []interface{} ++ a []any + } + + func (f *fmtStringer) String() string { +diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go +index 3cc75406218..6d2cadd79a9 100644 +--- a/vendor/google.golang.org/grpc/version.go ++++ b/vendor/google.golang.org/grpc/version.go +@@ -19,4 +19,4 @@ + package grpc + + // Version is the current grpc version. +-const Version = "1.56.3" ++const Version = "1.59.0" +diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh +index a8e4732b3d2..bb480f1f9cc 100644 +--- a/vendor/google.golang.org/grpc/vet.sh ++++ b/vendor/google.golang.org/grpc/vet.sh +@@ -84,12 +84,18 @@ not git grep -l 'x/net/context' -- "*.go" + # thread safety. + git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + ++# - Do not use "interface{}"; use "any" instead. ++git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' ++ + # - Do not call grpclog directly. Use grpclog.Component instead. + git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' + + # - Ensure all ptypes proto packages are renamed when importing. + not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + ++# - Ensure all usages of grpc_testing package are renamed when importing. ++not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" ++ + # - Ensure all xds proto imports are renamed to *pb or *grpc. + git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' + +@@ -106,7 +112,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" + +- go mod tidy -compat=1.17 ++ go mod tidy -compat=1.19 + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) + popd +@@ -168,8 +174,6 @@ proto.RegisteredExtension is deprecated + proto.RegisteredExtensions is deprecated + proto.RegisterMapType is deprecated + proto.Unmarshaler is deprecated +-resolver.Backend +-resolver.GRPCLB + Target is deprecated: Use the Target field in the BuildOptions instead. + xxx_messageInfo_ + ' "${SC_OUT}" +diff --git a/vendor/modules.txt b/vendor/modules.txt +index bbf59f9b1fe..ebadd4f6b6b 100644 +--- a/vendor/modules.txt ++++ b/vendor/modules.txt +@@ -1,7 +1,7 @@ + # bitbucket.org/bertimus9/systemstat v0.5.0 + ## explicit; go 1.17 + bitbucket.org/bertimus9/systemstat +-# cloud.google.com/go/compute v1.19.1 ++# cloud.google.com/go/compute v1.23.0 + ## explicit; go 1.19 + cloud.google.com/go/compute/internal + # cloud.google.com/go/compute/metadata v0.2.3 +@@ -114,8 +114,8 @@ github.com/beorn7/perks/quantile + # github.com/blang/semver/v4 v4.0.0 + ## explicit; go 1.14 + github.com/blang/semver/v4 +-# github.com/cenkalti/backoff/v4 v4.1.3 +-## explicit; go 1.13 ++# github.com/cenkalti/backoff/v4 v4.2.1 ++## explicit; go 1.18 + github.com/cenkalti/backoff/v4 + # github.com/cespare/xxhash/v2 v2.2.0 + ## explicit; go 1.11 +@@ -220,8 +220,8 @@ github.com/fvbommel/sortorder + # github.com/go-errors/errors v1.4.2 + ## explicit; go 1.14 + github.com/go-errors/errors +-# github.com/go-logr/logr v1.2.3 +-## explicit; go 1.16 ++# github.com/go-logr/logr v1.3.0 ++## explicit; go 1.18 + github.com/go-logr/logr + github.com/go-logr/logr/funcr + # github.com/go-logr/stdr v1.2.2 +@@ -378,7 +378,7 @@ github.com/google/gnostic/extensions + github.com/google/gnostic/jsonschema + github.com/google/gnostic/openapiv2 + github.com/google/gnostic/openapiv3 +-# github.com/google/go-cmp v0.5.9 ++# github.com/google/go-cmp v0.6.0 + ## explicit; go 1.13 + github.com/google/go-cmp/cmp + github.com/google/go-cmp/cmp/cmpopts +@@ -392,17 +392,39 @@ github.com/google/gofuzz + # github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 + ## explicit; go 1.14 + github.com/google/pprof/profile ++# github.com/google/s2a-go v0.1.4 ++## explicit; go 1.16 ++github.com/google/s2a-go ++github.com/google/s2a-go/fallback ++github.com/google/s2a-go/internal/authinfo ++github.com/google/s2a-go/internal/handshaker ++github.com/google/s2a-go/internal/handshaker/service ++github.com/google/s2a-go/internal/proto/common_go_proto ++github.com/google/s2a-go/internal/proto/s2a_context_go_proto ++github.com/google/s2a-go/internal/proto/s2a_go_proto ++github.com/google/s2a-go/internal/proto/v2/common_go_proto ++github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto ++github.com/google/s2a-go/internal/proto/v2/s2a_go_proto ++github.com/google/s2a-go/internal/record ++github.com/google/s2a-go/internal/record/internal/aeadcrypter ++github.com/google/s2a-go/internal/record/internal/halfconn ++github.com/google/s2a-go/internal/tokenmanager ++github.com/google/s2a-go/internal/v2 ++github.com/google/s2a-go/internal/v2/certverifier ++github.com/google/s2a-go/internal/v2/remotesigner ++github.com/google/s2a-go/internal/v2/tlsconfigstore ++github.com/google/s2a-go/stream + # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 + ## explicit; go 1.13 + github.com/google/shlex +-# github.com/google/uuid v1.3.0 ++# github.com/google/uuid v1.3.1 + ## explicit + github.com/google/uuid + # github.com/googleapis/enterprise-certificate-proxy v0.2.3 + ## explicit; go 1.19 + github.com/googleapis/enterprise-certificate-proxy/client + github.com/googleapis/enterprise-certificate-proxy/client/util +-# github.com/googleapis/gax-go/v2 v2.7.1 ++# github.com/googleapis/gax-go/v2 v2.11.0 + ## explicit; go 1.19 + github.com/googleapis/gax-go/v2 + github.com/googleapis/gax-go/v2/apierror +@@ -425,8 +447,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus + github.com/grpc-ecosystem/grpc-gateway/internal + github.com/grpc-ecosystem/grpc-gateway/runtime + github.com/grpc-ecosystem/grpc-gateway/utilities +-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 +-## explicit; go 1.14 ++# github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 ++## explicit; go 1.17 + github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule + github.com/grpc-ecosystem/grpc-gateway/v2/runtime + github.com/grpc-ecosystem/grpc-gateway/v2/utilities +@@ -658,8 +680,8 @@ github.com/spf13/pflag + # github.com/stoewer/go-strcase v1.2.0 + ## explicit; go 1.11 + github.com/stoewer/go-strcase +-# github.com/stretchr/testify v1.8.1 +-## explicit; go 1.13 ++# github.com/stretchr/testify v1.8.4 ++## explicit; go 1.20 + github.com/stretchr/testify/assert + github.com/stretchr/testify/require + # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 +@@ -849,63 +871,61 @@ go.opencensus.io/trace/tracestate + # go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 + ## explicit; go 1.17 + go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful +-# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 +-## explicit; go 1.17 ++# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 ++## explicit; go 1.20 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal +-# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 +-## explicit; go 1.17 ++# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 ++## explicit; go 1.19 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +-# go.opentelemetry.io/otel v1.10.0 +-## explicit; go 1.17 ++go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil ++# go.opentelemetry.io/otel v1.20.0 ++## explicit; go 1.20 + go.opentelemetry.io/otel + go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/baggage + go.opentelemetry.io/otel/codes +-go.opentelemetry.io/otel/exporters/otlp/internal +-go.opentelemetry.io/otel/exporters/otlp/internal/envconfig + go.opentelemetry.io/otel/internal ++go.opentelemetry.io/otel/internal/attribute + go.opentelemetry.io/otel/internal/baggage + go.opentelemetry.io/otel/internal/global + go.opentelemetry.io/otel/propagation + go.opentelemetry.io/otel/semconv/internal + go.opentelemetry.io/otel/semconv/v1.12.0 ++go.opentelemetry.io/otel/semconv/v1.17.0 ++go.opentelemetry.io/otel/semconv/v1.21.0 + go.opentelemetry.io/otel/semconv/v1.4.0 +-# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 +-## explicit; go 1.17 +-go.opentelemetry.io/otel/exporters/otlp/internal/retry +-# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 +-## explicit; go 1.17 ++# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 ++## explicit; go 1.20 + go.opentelemetry.io/otel/exporters/otlp/otlptrace +-go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig + go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform +-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 +-## explicit; go 1.17 ++# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 ++## explicit; go 1.20 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc +-# go.opentelemetry.io/otel/metric v0.31.0 +-## explicit; go 1.17 ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig ++go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry ++# go.opentelemetry.io/otel/metric v1.20.0 ++## explicit; go 1.20 + go.opentelemetry.io/otel/metric +-go.opentelemetry.io/otel/metric/global +-go.opentelemetry.io/otel/metric/instrument +-go.opentelemetry.io/otel/metric/instrument/asyncfloat64 +-go.opentelemetry.io/otel/metric/instrument/asyncint64 +-go.opentelemetry.io/otel/metric/instrument/syncfloat64 +-go.opentelemetry.io/otel/metric/instrument/syncint64 +-go.opentelemetry.io/otel/metric/internal/global +-go.opentelemetry.io/otel/metric/unit +-# go.opentelemetry.io/otel/sdk v1.10.0 +-## explicit; go 1.17 ++go.opentelemetry.io/otel/metric/embedded ++# go.opentelemetry.io/otel/sdk v1.20.0 ++## explicit; go 1.20 ++go.opentelemetry.io/otel/sdk + go.opentelemetry.io/otel/sdk/instrumentation + go.opentelemetry.io/otel/sdk/internal + go.opentelemetry.io/otel/sdk/internal/env + go.opentelemetry.io/otel/sdk/resource + go.opentelemetry.io/otel/sdk/trace + go.opentelemetry.io/otel/sdk/trace/tracetest +-# go.opentelemetry.io/otel/trace v1.10.0 +-## explicit; go 1.17 ++# go.opentelemetry.io/otel/trace v1.20.0 ++## explicit; go 1.20 + go.opentelemetry.io/otel/trace +-# go.opentelemetry.io/proto/otlp v0.19.0 +-## explicit; go 1.14 ++go.opentelemetry.io/otel/trace/embedded ++go.opentelemetry.io/otel/trace/noop ++# go.opentelemetry.io/proto/otlp v1.0.0 ++## explicit; go 1.17 + go.opentelemetry.io/proto/otlp/collector/trace/v1 + go.opentelemetry.io/proto/otlp/common/v1 + go.opentelemetry.io/proto/otlp/resource/v1 +@@ -944,11 +964,13 @@ go.uber.org/zap/zaptest + golang.org/x/crypto/bcrypt + golang.org/x/crypto/blowfish + golang.org/x/crypto/chacha20 ++golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/cryptobyte + golang.org/x/crypto/cryptobyte/asn1 + golang.org/x/crypto/curve25519 + golang.org/x/crypto/curve25519/internal/field + golang.org/x/crypto/ed25519 ++golang.org/x/crypto/hkdf + golang.org/x/crypto/internal/alias + golang.org/x/crypto/internal/poly1305 + golang.org/x/crypto/nacl/secretbox +@@ -979,8 +1001,8 @@ golang.org/x/net/internal/timeseries + golang.org/x/net/proxy + golang.org/x/net/trace + golang.org/x/net/websocket +-# golang.org/x/oauth2 v0.7.0 +-## explicit; go 1.17 ++# golang.org/x/oauth2 v0.11.0 ++## explicit; go 1.18 + golang.org/x/oauth2 + golang.org/x/oauth2/authhandler + golang.org/x/oauth2/google +@@ -1055,7 +1077,7 @@ golang.org/x/tools/internal/tokeninternal + golang.org/x/tools/internal/typeparams + golang.org/x/tools/internal/typesinternal + golang.org/x/tools/internal/versions +-# google.golang.org/api v0.114.0 ++# google.golang.org/api v0.126.0 + ## explicit; go 1.19 + google.golang.org/api/compute/v0.alpha + google.golang.org/api/compute/v0.beta +@@ -1084,24 +1106,27 @@ google.golang.org/appengine/internal/datastore + google.golang.org/appengine/internal/log + google.golang.org/appengine/internal/modules + google.golang.org/appengine/internal/remote_api ++google.golang.org/appengine/internal/socket + google.golang.org/appengine/internal/urlfetch ++google.golang.org/appengine/socket + google.golang.org/appengine/urlfetch +-# google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 ++# google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d + ## explicit; go 1.19 ++google.golang.org/genproto/internal + google.golang.org/genproto/protobuf/field_mask +-# google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a ++# google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d + ## explicit; go 1.19 + google.golang.org/genproto/googleapis/api + google.golang.org/genproto/googleapis/api/annotations + google.golang.org/genproto/googleapis/api/expr/v1alpha1 + google.golang.org/genproto/googleapis/api/httpbody +-# google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 ++# google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d + ## explicit; go 1.19 + google.golang.org/genproto/googleapis/rpc/code + google.golang.org/genproto/googleapis/rpc/errdetails + google.golang.org/genproto/googleapis/rpc/status +-# google.golang.org/grpc v1.56.3 +-## explicit; go 1.17 ++# google.golang.org/grpc v1.59.0 ++## explicit; go 1.19 + google.golang.org/grpc + google.golang.org/grpc/attributes + google.golang.org/grpc/backoff +@@ -1134,6 +1159,7 @@ google.golang.org/grpc/internal/grpclog + google.golang.org/grpc/internal/grpcrand + google.golang.org/grpc/internal/grpcsync + google.golang.org/grpc/internal/grpcutil ++google.golang.org/grpc/internal/idle + google.golang.org/grpc/internal/metadata + google.golang.org/grpc/internal/pretty + google.golang.org/grpc/internal/resolver